diff --git a/.github/lock.yml b/.github/lock.yml deleted file mode 100644 index 78f7b19b71d3..000000000000 --- a/.github/lock.yml +++ /dev/null @@ -1,2 +0,0 @@ -daysUntilLock: 180 -lockComment: false diff --git a/.github/mergeable.yml b/.github/mergeable.yml index d647dafb7ab1..187de98277b3 100644 --- a/.github/mergeable.yml +++ b/.github/mergeable.yml @@ -5,32 +5,17 @@ mergeable: - do: label must_include: regex: '^Type:' - fail: - - do: checks - status: 'failure' - payload: - title: 'Need an appropriate "Type:" label' - summary: 'Need an appropriate "Type:" label' - - when: pull_request.* - # This validator requires either the "no release notes" label OR a "Release" milestone - # to be considered successful. However, validators "pass" in mergeable only if all - # checks pass. So it is implemented in reverse. - # I.e.: !(!no_relnotes && !release_milestone) ==> no_relnotes || release_milestone - # If both validators pass, then it is considered a failure, and if either fails, it is - # considered a success. - validate: - - do: label - must_exclude: - regex: '^no release notes$' + - do: description + must_include: + # Allow: + # RELEASE NOTES: none (case insensitive) + # + # RELEASE NOTES: N/A (case insensitive) + # + # RELEASE NOTES: + # * + regex: '^RELEASE NOTES:\s*([Nn][Oo][Nn][Ee]|[Nn]/[Aa]|\n(\*|-)\s*.+)$' + regex_flag: 'm' - do: milestone - must_exclude: + must_include: regex: 'Release$' - pass: - - do: checks - status: 'failure' # fail on pass - payload: - title: 'Need Release milestone or "no release notes" label' - summary: 'Need Release milestone or "no release notes" label' - fail: - - do: checks - status: 'success' # pass on fail diff --git a/.github/stale.yml b/.github/stale.yml deleted file mode 100644 index 8f69dbc4fe83..000000000000 --- a/.github/stale.yml +++ /dev/null @@ -1,58 +0,0 @@ -# Configuration for probot-stale - https://github.com/probot/stale - -# Number of days of inactivity before an Issue or Pull Request becomes stale -daysUntilStale: 6 - -# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. -# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. -daysUntilClose: 7 - -# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) -onlyLabels: - - "Status: Requires Reporter Clarification" - -# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable -exemptLabels: [] - -# Set to true to ignore issues in a project (defaults to false) -exemptProjects: false - -# Set to true to ignore issues in a milestone (defaults to false) -exemptMilestones: false - -# Set to true to ignore issues with an assignee (defaults to false) -exemptAssignees: false - -# Label to use when marking as stale -staleLabel: "stale" - -# Comment to post when marking as stale. Set to `false` to disable -markComment: > - This issue is labeled as requiring an update from the reporter, and no update has been received - after 6 days. If no update is provided in the next 7 days, this issue will be automatically closed. - -# Comment to post when removing the stale label. -# unmarkComment: > -# Your comment here. - -# Comment to post when closing a stale Issue or Pull Request. -# closeComment: > -# Your comment here. - -# Limit the number of actions per hour, from 1-30. Default is 30 -limitPerRun: 1 - -# Limit to only `issues` or `pulls` -# only: issues - -# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': -# pulls: -# daysUntilStale: 30 -# markComment: > -# This pull request has been automatically marked as stale because it has not had -# recent activity. It will be closed if no further activity occurs. Thank you -# for your contributions. - -# issues: -# exemptLabels: -# - confirmed diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0c3806bdc23d..9ed65e45b91d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -3,16 +3,20 @@ name: "CodeQL" on: push: branches: [ master ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ master ] schedule: - cron: '24 20 * * 3' +permissions: + contents: read + security-events: write + pull-requests: read + actions: read + jobs: analyze: name: Analyze runs-on: ubuntu-latest + timeout-minutes: 30 strategy: fail-fast: false @@ -23,9 +27,9 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v2 with: languages: go - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml new file mode 100644 index 000000000000..5f49c7900a3d --- /dev/null +++ b/.github/workflows/lock.yml @@ -0,0 +1,20 @@ +name: 'Lock Threads' + +on: + workflow_dispatch: + schedule: + - cron: '22 1 * * *' + +permissions: + issues: write + pull-requests: write + +jobs: + lock: + runs-on: ubuntu-latest + steps: + - uses: dessant/lock-threads@v2 + with: + github-token: ${{ github.token }} + issue-lock-inactive-days: 180 + pr-lock-inactive-days: 180 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6c0748048cc9..d3dda5376c08 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -6,13 +6,16 @@ on: jobs: release: + permissions: + contents: write # to upload release asset (actions/upload-release-asset) + name: Release cmd/protoc-gen-go-grpc runs-on: ubuntu-latest if: startsWith(github.event.release.tag_name, 'cmd/protoc-gen-go-grpc/') strategy: matrix: goos: [linux, darwin, windows] - goarch: [386, amd64] + goarch: [386, amd64, arm64] exclude: - goos: darwin goarch: 386 @@ -48,7 +51,7 @@ jobs: run: | PACKAGE_NAME=protoc-gen-go-grpc.${GITHUB_REF#refs/tags/cmd/protoc-gen-go-grpc/}.${{ matrix.goos }}.${{ matrix.goarch }}.tar.gz tar -czvf $PACKAGE_NAME -C build . - echo ::set-output name=name::${PACKAGE_NAME} + echo "name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT - name: Upload asset uses: actions/upload-release-asset@v1 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000000..5e01a1e70c45 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,30 @@ +name: Stale bot + +on: + workflow_dispatch: + schedule: + - cron: "44 */2 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v4 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + days-before-stale: 6 + days-before-close: 7 + only-labels: 'Status: Requires Reporter Clarification' + stale-issue-label: 'stale' + stale-pr-label: 'stale' + operations-per-run: 999 + stale-issue-message: > + This issue is labeled as requiring an update from the reporter, and no update has been received + after 6 days. If no update is provided in the next 7 days, this issue will be automatically closed. + stale-pr-message: > + This PR is labeled as requiring an update from the reporter, and no update has been received + after 6 days. If no update is provided in the next 7 days, this issue will be automatically closed. diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 378e2846676f..afb830852597 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -7,6 +7,9 @@ on: schedule: - cron: 0 0 * * * # daily at 00:00 +permissions: + contents: read + # Always force the use of Go modules env: GO111MODULE: on @@ -15,14 +18,17 @@ jobs: # Check generated protos match their source repos (optional for PRs). vet-proto: runs-on: ubuntu-latest + timeout-minutes: 20 + env: + VET_ONLY_PROTO: 1 steps: # Setup the environment. - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: - go-version: 1.15 + go-version: '1.20' - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 # Run the vet checks. - name: vet @@ -34,77 +40,92 @@ jobs: env: VET_SKIP_PROTO: 1 runs-on: ubuntu-latest + timeout-minutes: 20 strategy: + fail-fast: false matrix: include: - type: vet - goversion: 1.15 - - type: race - goversion: 1.15 - - type: 386 - goversion: 1.15 - - type: retry - goversion: 1.15 - - type: extras - goversion: 1.15 + goversion: '1.20' + - type: tests - goversion: 1.14 + goversion: '1.20' + + - type: tests + goversion: '1.20' + testflags: -race + + - type: tests + goversion: '1.20' + goarch: 386 + + - type: tests + goversion: '1.20' + goarch: arm64 + + - type: tests + goversion: '1.19' + - type: tests - goversion: 1.13 - - type: tests111 - goversion: 1.11 # Keep until interop tests no longer require Go1.11 + goversion: '1.18' + + - type: extras + goversion: '1.20' steps: # Setup the environment. - - name: Setup GOARCH=386 - if: ${{ matrix.type == '386' }} - run: echo "GOARCH=386" >> $GITHUB_ENV - - name: Setup RETRY - if: ${{ matrix.type == 'retry' }} - run: echo "GRPC_GO_RETRY=on" >> $GITHUB_ENV + - name: Setup GOARCH + if: matrix.goarch != '' + run: echo "GOARCH=${{ matrix.goarch }}" >> $GITHUB_ENV + + - name: Setup qemu emulator + if: matrix.goarch == 'arm64' + # setup qemu-user-static emulator and register it with binfmt_misc so that aarch64 binaries + # are automatically executed using qemu. + run: docker run --rm --privileged multiarch/qemu-user-static:5.2.0-2 --reset --credential yes --persistent yes + + - name: Setup GRPC environment + if: matrix.grpcenv != '' + run: echo "${{ matrix.grpcenv }}" >> $GITHUB_ENV + - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: ${{ matrix.goversion }} + - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 # Only run vet for 'vet' runs. - name: Run vet.sh - if: ${{ matrix.type == 'vet' }} + if: matrix.type == 'vet' run: ./vet.sh -install && ./vet.sh - # Main tests run for everything except when testing "extras", the race - # detector and Go1.11 (where we run a reduced set of tests). + # Main tests run for everything except when testing "extras" + # (where we run a reduced set of tests). - name: Run tests - if: ${{ matrix.type != 'extras' && matrix.type != 'race' && matrix.type != 'tests111' }} + if: matrix.type == 'tests' run: | go version - go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... - - # Race detector tests - - name: Run test race - if: ${{ matrix.TYPE == 'race' }} - run: | - go version - go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + go test ${{ matrix.testflags }} -cpu 1,4 -timeout 7m google.golang.org/grpc/... + cd "${GITHUB_WORKSPACE}" + for MOD_FILE in $(find . -name 'go.mod' | grep -Ev '^\./go\.mod'); do + pushd "$(dirname ${MOD_FILE})" + go test ${{ matrix.testflags }} -cpu 1,4 -timeout 2m ./... + popd + done # Non-core gRPC tests (examples, interop, etc) - name: Run extras tests - if: ${{ matrix.TYPE == 'extras' }} + if: matrix.type == 'extras' run: | + export TERM=${TERM:-xterm} go version + echo -e "\n-- Running Examples --" examples/examples_test.sh + echo -e "\n-- Running AdvancedTLS Examples --" security/advancedtls/examples/examples_test.sh + echo -e "\n-- Running Interop Test --" interop/interop_test.sh - cd ${GITHUB_WORKSPACE}/security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... - cd ${GITHUB_WORKSPACE}/security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... - - # Reduced set of tests for Go 1.11 - - name: Run Go1.11 tests - if: ${{ matrix.type == 'tests111' }} - run: | - go version - tests=$(find ${GITHUB_WORKSPACE} -name '*_test.go' | xargs -n1 dirname | sort -u | sed "s:^${GITHUB_WORKSPACE}:.:" | sed "s:\/$::" | grep -v ^./security | grep -v ^./credentials/sts | grep -v ^./credentials/tls/certprovider | grep -v ^./credentials/xds | grep -v ^./xds ) - echo "Running tests for " ${tests} - go test -cpu 1,4 -timeout 7m ${tests} + echo -e "\n-- Running xDS E2E Test --" + xds/internal/test/e2e/run.sh diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5847d94e5512..000000000000 --- a/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: go - -matrix: - include: - - go: 1.14.x - env: VET=1 GO111MODULE=on - - go: 1.14.x - env: RACE=1 GO111MODULE=on - - go: 1.14.x - env: RUN386=1 - - go: 1.14.x - env: GRPC_GO_RETRY=on - - go: 1.14.x - env: TESTEXTRAS=1 - - go: 1.13.x - env: GO111MODULE=on - - go: 1.12.x - env: GO111MODULE=on - - go: 1.11.x # Keep until interop tests no longer require Go1.11 - env: GO111MODULE=on - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi - -install: - - try3() { eval "$*" || eval "$*" || eval "$*"; } - - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi - -script: - - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; security/advancedtls/examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - - if [[ -n "${VET}" ]]; then ./vet.sh; fi - - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - - make test diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cd03f8c76888..608aa6e1ac5e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use @@ -53,9 +66,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/Documentation/anti-patterns.md b/Documentation/anti-patterns.md new file mode 100644 index 000000000000..08469fc179f7 --- /dev/null +++ b/Documentation/anti-patterns.md @@ -0,0 +1,206 @@ +## Anti-Patterns + +### Dialing in gRPC +[`grpc.Dial`](https://pkg.go.dev/google.golang.org/grpc#Dial) is a function in +the gRPC library that creates a virtual connection from the gRPC client to the +gRPC server. It takes a target URI (which can represent the name of a logical +backend service and could resolve to multiple actual addresses) and a list of +options, and returns a +[`ClientConn`](https://pkg.go.dev/google.golang.org/grpc#ClientConn) object that +represents the connection to the server. The `ClientConn` contains one or more +actual connections to real server backends and attempts to keep these +connections healthy by automatically reconnecting to them when they break. + +The `Dial` function can also be configured with various options to customize the +behavior of the client connection. For example, developers could use options +such a +[`WithTransportCredentials`](https://pkg.go.dev/google.golang.org/grpc#WithTransportCredentials) +to configure the transport credentials to use. + +While `Dial` is commonly referred to as a "dialing" function, it doesn't +actually perform the low-level network dialing operation like +[`net.Dial`](https://pkg.go.dev/net#Dial) would. Instead, it creates a virtual +connection from the gRPC client to the gRPC server. + +`Dial` does initiate the process of connecting to the server, but it uses the +ClientConn object to manage and maintain that connection over time. This is why +errors encountered during the initial connection are no different from those +that occur later on, and why it's important to handle errors from RPCs rather +than relying on options like +[`FailOnNonTempDialError`](https://pkg.go.dev/google.golang.org/grpc#FailOnNonTempDialError), +[`WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), and +[`WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). +In fact, `Dial` does not always establish a connection to servers by default. +The connection behavior is determined by the load balancing policy being used. +For instance, an "active" load balancing policy such as Round Robin attempts to +maintain a constant connection, while the default "pick first" policy delays +connection until an RPC is executed. Instead of using the WithBlock option, which +may not be recommended in some cases, you can call the +[`ClientConn.Connect`](https://pkg.go.dev/google.golang.org/grpc#ClientConn.Connect) +method to explicitly initiate a connection. + +### Using `FailOnNonTempDialError`, `WithBlock`, and `WithReturnConnectionError` + +The gRPC API provides several options that can be used to configure the behavior +of dialing and connecting to a gRPC server. Some of these options, such as +`FailOnNonTempDialError`, `WithBlock`, and `WithReturnConnectionError`, rely on +failures at dial time. However, we strongly discourage developers from using +these options, as they can introduce race conditions and result in unreliable +and difficult-to-debug code. + +One of the most important reasons for avoiding these options, which is often +overlooked, is that connections can fail at any point in time. This means that +you need to handle RPC failures caused by connection issues, regardless of +whether a connection was never established in the first place, or if it was +created and then immediately lost. Implementing proper error handling for RPCs +is crucial for maintaining the reliability and stability of your gRPC +communication. + +### Why we discourage using `FailOnNonTempDialError`, `WithBlock`, and `WithReturnConnectionError` + +When a client attempts to connect to a gRPC server, it can encounter a variety +of errors, including network connectivity issues, server-side errors, and +incorrect usage of the gRPC API. The options `FailOnNonTempDialError`, +`WithBlock`, and `WithReturnConnectionError` are designed to handle some of +these errors, but they do so by relying on failures at dial time. This means +that they may not provide reliable or accurate information about the status of +the connection. + +For example, if a client uses `WithBlock` to wait for a connection to be +established, it may end up waiting indefinitely if the server is not responding. +Similarly, if a client uses `WithReturnConnectionError` to return a connection +error if dialing fails, it may miss opportunities to recover from transient +network issues that are resolved shortly after the initial dial attempt. + +## Best practices for error handling in gRPC + +Instead of relying on failures at dial time, we strongly encourage developers to +rely on errors from RPCs. When a client makes an RPC, it can receive an error +response from the server. These errors can provide valuable information about +what went wrong, including information about network issues, server-side errors, +and incorrect usage of the gRPC API. + +By handling errors from RPCs correctly, developers can write more reliable and +robust gRPC applications. Here are some best practices for error handling in +gRPC: + +- Always check for error responses from RPCs and handle them appropriately. +- Use the `status` field of the error response to determine the type of error that + occurred. +- When retrying failed RPCs, consider using the built-in retry mechanism + provided by gRPC-Go, if available, instead of manually implementing retries. + Refer to the [gRPC-Go retry example + documentation](https://github.com/grpc/grpc-go/blob/master/examples/features/retry/README.md) + for more information. +- Avoid using `FailOnNonTempDialError`, `WithBlock`, and + `WithReturnConnectionError`, as these options can introduce race conditions and + result in unreliable and difficult-to-debug code. +- If making the outgoing RPC in order to handle an incoming RPC, be sure to + translate the status code before returning the error from your method handler. + For example, if the error is an `INVALID_ARGUMENT` error, that probably means + your service has a bug (otherwise it shouldn't have triggered this error), in + which case `INTERNAL` is more appropriate to return back to your users. + +### Example: Handling errors from an RPC + +The following code snippet demonstrates how to handle errors from an RPC in +gRPC: + +```go +ctx, cancel := context.WithTimeout(context.Background(), time.Second) +defer cancel() + +res, err := client.MyRPC(ctx, &MyRequest{}) +if err != nil { + // Handle the error appropriately, + // log it & return an error to the caller, etc. + log.Printf("Error calling MyRPC: %v", err) + return nil, err +} + +// Use the response as appropriate +log.Printf("MyRPC response: %v", res) +``` + +To determine the type of error that occurred, you can use the status field of +the error response: + + +```go +resp, err := client.MakeRPC(context.Background(), request) +if err != nil { + status, ok := status.FromError(err) + if ok { + // Handle the error based on its status code + if status.Code() == codes.NotFound { + log.Println("Requested resource not found") + } else { + log.Printf("RPC error: %v", status.Message()) + } + } else { + //Handle non-RPC errors + log.Printf("Non-RPC error: %v", err) + } + return +} + +// Use the response as needed +log.Printf("Response received: %v", resp) +``` + +### Example: Using a backoff strategy + + +When retrying failed RPCs, use a backoff strategy to avoid overwhelming the +server or exacerbating network issues: + + +```go +var res *MyResponse +var err error + +// If the user doesn't have a context with a deadline, create one +ctx, cancel := context.WithTimeout(context.Background(), time.Second) +defer cancel() + +// Retry the RPC call a maximum number of times +for i := 0; i < maxRetries; i++ { + + // Make the RPC call + res, err = client.MyRPC(ctx, &MyRequest{}) + + // Check if the RPC call was successful + if err == nil { + // The RPC was successful, so break out of the loop + break + } + + // The RPC failed, so wait for a backoff period before retrying + backoff := time.Duration(i) * time.Second + log.Printf("Error calling MyRPC: %v; retrying in %v", err, backoff) + time.Sleep(backoff) +} + +// Check if the RPC call was successful after all retries +if err != nil { + // All retries failed, so handle the error appropriately + log.Printf("Error calling MyRPC: %v", err) + return nil, err +} + +// Use the response as appropriate +log.Printf("MyRPC response: %v", res) +``` + + +## Conclusion + +The +[`FailOnNonTempDialError`](https://pkg.go.dev/google.golang.org/grpc#FailOnNonTempDialError), +[`WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), and +[`WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError) +options are designed to handle errors at dial time, but they can introduce race +conditions and result in unreliable and difficult-to-debug code. Instead of +relying on these options, we strongly encourage developers to rely on errors +from RPCs for error handling. By following best practices for error handling in +gRPC, developers can write more reliable and robust gRPC applications. diff --git a/Documentation/grpc-auth-support.md b/Documentation/grpc-auth-support.md index 0a6b9f52c1cd..1362eeaa4ae2 100644 --- a/Documentation/grpc-auth-support.md +++ b/Documentation/grpc-auth-support.md @@ -53,7 +53,7 @@ Alternatively, a client may also use the `grpc.CallOption` on each invocation of an RPC. To create a `credentials.PerRPCCredentials`, use -[oauth.NewOauthAccess](https://godoc.org/google.golang.org/grpc/credentials/oauth#NewOauthAccess). +[oauth.TokenSource](https://godoc.org/google.golang.org/grpc/credentials/oauth#TokenSource). Note, the OAuth2 implementation of `grpc.PerRPCCredentials` requires a client to use [grpc.WithTransportCredentials](https://godoc.org/google.golang.org/grpc#WithTransportCredentials) to prevent any insecure transmission of tokens. diff --git a/Documentation/grpc-metadata.md b/Documentation/grpc-metadata.md index ff4de6e71de3..06b36f4ac171 100644 --- a/Documentation/grpc-metadata.md +++ b/Documentation/grpc-metadata.md @@ -223,3 +223,8 @@ func (s *server) SomeStreamingRPC(stream pb.Service_SomeStreamingRPCServer) erro stream.SetTrailer(trailer) } ``` + +## Updating metadata from a server interceptor + +An example for updating metadata from a server interceptor is +available [here](../examples/features/metadata_interceptor/server/main.go). diff --git a/Documentation/proxy.md b/Documentation/proxy.md index 8fd6ee5248a8..189cdfbcb09b 100644 --- a/Documentation/proxy.md +++ b/Documentation/proxy.md @@ -1,8 +1,9 @@ # Proxy HTTP CONNECT proxies are supported by default in gRPC. The proxy address can be -specified by the environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or -the lowercase versions thereof). +specified by the environment variables `HTTPS_PROXY` and `NO_PROXY`. Before Go +1.16, if the `HTTPS_PROXY` environment variable is unset, `HTTP_PROXY` will be +used instead. (Note that these environment variables are case insensitive.) ## Custom proxy @@ -12,4 +13,4 @@ connection before giving it to gRPC. If the default proxy doesn't work for you, replace the default dialer with your custom proxy dialer. This can be done using -[`WithDialer`](https://godoc.org/google.golang.org/grpc#WithDialer). \ No newline at end of file +[`WithDialer`](https://godoc.org/google.golang.org/grpc#WithDialer). diff --git a/Documentation/server-reflection-tutorial.md b/Documentation/server-reflection-tutorial.md index b1781fa68dc9..6c7dc6cd6a5f 100644 --- a/Documentation/server-reflection-tutorial.md +++ b/Documentation/server-reflection-tutorial.md @@ -2,8 +2,9 @@ gRPC Server Reflection provides information about publicly-accessible gRPC services on a server, and assists clients at runtime to construct RPC requests -and responses without precompiled service information. It is used by gRPC CLI, -which can be used to introspect server protos and send/receive test RPCs. +and responses without precompiled service information. It is used by +[gRPCurl](https://github.com/fullstorydev/grpcurl), which can be used to +introspect server protos and send/receive test RPCs. ## Enable Server Reflection @@ -39,36 +40,41 @@ make the following changes: An example server with reflection registered can be found at `examples/features/reflection/server`. -## gRPC CLI +## gRPCurl -After enabling Server Reflection in a server application, you can use gRPC CLI -to check its services. gRPC CLI is only available in c++. Instructions on how to -build and use gRPC CLI can be found at -[command_line_tool.md](https://github.com/grpc/grpc/blob/master/doc/command_line_tool.md). +After enabling Server Reflection in a server application, you can use gRPCurl +to check its services. gRPCurl is built with Go and has packages available. +Instructions on how to install and use gRPCurl can be found at +[gRPCurl Installation](https://github.com/fullstorydev/grpcurl#installation). -## Use gRPC CLI to check services +## Use gRPCurl to check services First, start the helloworld server in grpc-go directory: ```sh -$ cd -$ go run examples/features/reflection/server/main.go +$ cd /examples +$ go run features/reflection/server/main.go ``` -Open a new terminal and make sure you are in the directory where grpc_cli lives: - +output: ```sh -$ cd /bins/opt +server listening at [::]:50051 ``` -### List services +After installing gRPCurl, open a new terminal and run the commands from the new +terminal. + +**NOTE:** gRPCurl expects a TLS-encrypted connection by default. For all of +the commands below, use the `-plaintext` flag to use an unencrypted connection. -`grpc_cli ls` command lists services and methods exposed at a given port: +### List services and methods + +The `list` command lists services exposed at a given port: - List all the services exposed at a given port ```sh - $ ./grpc_cli ls localhost:50051 + $ grpcurl -plaintext localhost:50051 list ``` output: @@ -78,72 +84,88 @@ $ cd /bins/opt helloworld.Greeter ``` -- List one service with details +- List all the methods of a service - `grpc_cli ls` command inspects a service given its full name (in the format of - \.\). It can print information with a long listing format - when `-l` flag is set. This flag can be used to get more details about a - service. + The `list` command lists methods given the full service name (in the format of + \.\). ```sh - $ ./grpc_cli ls localhost:50051 helloworld.Greeter -l + $ grpcurl -plaintext localhost:50051 list helloworld.Greeter ``` output: ```sh - filename: helloworld.proto - package: helloworld; - service Greeter { - rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} - } + helloworld.Greeter.SayHello + ``` + +### Describe services and methods + +- Describe all services + The `describe` command inspects a service given its full name (in the format + of \.\). + + ```sh + $ grpcurl -plaintext localhost:50051 describe helloworld.Greeter ``` -### List methods + output: + ```sh + helloworld.Greeter is a service: + service Greeter { + rpc SayHello ( .helloworld.HelloRequest ) returns ( .helloworld.HelloReply ); + } + ``` -- List one method with details +- Describe all methods of a service - `grpc_cli ls` command also inspects a method given its full name (in the - format of \.\.\). + The `describe` command inspects a method given its full name (in the format of + \.\.\). ```sh - $ ./grpc_cli ls localhost:50051 helloworld.Greeter.SayHello -l + $ grpcurl -plaintext localhost:50051 describe helloworld.Greeter.SayHello ``` output: ```sh - rpc SayHello(helloworld.HelloRequest) returns (helloworld.HelloReply) {} + helloworld.Greeter.SayHello is a method: + rpc SayHello ( .helloworld.HelloRequest ) returns ( .helloworld.HelloReply ); ``` ### Inspect message types -We can use`grpc_cli type` command to inspect request/response types given the +We can use the `describe` command to inspect request/response types given the full name of the type (in the format of \.\). - Get information about the request type ```sh - $ ./grpc_cli type localhost:50051 helloworld.HelloRequest + $ grpcurl -plaintext localhost:50051 describe helloworld.HelloRequest ``` output: ```sh + helloworld.HelloRequest is a message: message HelloRequest { - optional string name = 1[json_name = "name"]; + string name = 1; } ``` ### Call a remote method -We can send RPCs to a server and get responses using `grpc_cli call` command. +We can send RPCs to a server and get responses using the full method name (in +the format of \.\.\). The `-d ` flag +represents the request data and the `-format text` flag indicates that the +request data is in text format. - Call a unary method ```sh - $ ./grpc_cli call localhost:50051 SayHello "name: 'gRPC CLI'" + $ grpcurl -plaintext -format text -d 'name: "gRPCurl"' \ + localhost:50051 helloworld.Greeter.SayHello ``` output: ```sh - message: "Hello gRPC CLI" + message: "Hello gRPCurl" ``` diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 093c82b3afe8..c6672c0a3efe 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -8,17 +8,18 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB for general contribution guidelines. ## Maintainers (in alphabetical order) -- [canguler](https://github.com/canguler), Google LLC + - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC - [menghanl](https://github.com/menghanl), Google LLC - [srini100](https://github.com/srini100), Google LLC ## Emeritus Maintainers (in alphabetical order) - [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC - [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC - [jtattermusch](https://github.com/jtattermusch), Google LLC - [lyuxuan](https://github.com/lyuxuan), Google LLC - [makmukhi](https://github.com/makmukhi), Google LLC diff --git a/Makefile b/Makefile index 1f0722f16243..1f8960922b3b 100644 --- a/Makefile +++ b/Makefile @@ -41,8 +41,6 @@ vetdeps: clean \ proto \ test \ - testappengine \ - testappenginedeps \ testrace \ vet \ vetdeps diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 000000000000..530197749e9d --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/README.md b/README.md index 3949a683fb58..0e6ae69a5846 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,6 @@ errors. [Go module]: https://github.com/golang/go/wiki/Modules [gRPC]: https://grpc.io [Go gRPC docs]: https://grpc.io/docs/languages/go -[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696 +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 [quick start]: https://grpc.io/docs/languages/go/quickstart [go-releases]: https://golang.org/doc/devel/release.html diff --git a/admin/admin.go b/admin/admin.go index 5212250b7d4e..41ae156b4d15 100644 --- a/admin/admin.go +++ b/admin/admin.go @@ -20,9 +20,10 @@ // administration services to a gRPC server. The services registered are: // // - Channelz: https://github.com/grpc/proposal/blob/master/A14-channelz.md +// // - CSDS: https://github.com/grpc/proposal/blob/master/A40-csds-support.md // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/admin/test/utils.go b/admin/test/utils.go index 1add8afa824c..086ba2e6e476 100644 --- a/admin/test/utils.go +++ b/admin/test/utils.go @@ -26,15 +26,18 @@ import ( "testing" "time" - v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" - v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "github.com/google/uuid" "google.golang.org/grpc" "google.golang.org/grpc/admin" - channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" "google.golang.org/grpc/status" + + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) const ( @@ -52,8 +55,7 @@ type ExpectedStatusCodes struct { // codes. func RunRegisterTests(t *testing.T, ec ExpectedStatusCodes) { nodeID := uuid.New().String() - bootstrapCleanup, err := xds.SetupBootstrapFile(xds.BootstrapOptions{ - Version: xds.TransportV3, + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ NodeID: nodeID, ServerURI: "no.need.for.a.server", }) @@ -78,7 +80,7 @@ func RunRegisterTests(t *testing.T, ec ExpectedStatusCodes) { server.Serve(lis) }() - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("cannot connect to server: %v", err) } @@ -97,7 +99,7 @@ func RunRegisterTests(t *testing.T, ec ExpectedStatusCodes) { // RunChannelz makes a channelz RPC. func RunChannelz(conn *grpc.ClientConn) error { - c := channelzpb.NewChannelzClient(conn) + c := channelzgrpc.NewChannelzClient(conn) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() _, err := c.GetTopChannels(ctx, &channelzpb.GetTopChannelsRequest{}, grpc.WaitForReady(true)) diff --git a/attributes/attributes.go b/attributes/attributes.go index 3220d87be403..49712aca33ae 100644 --- a/attributes/attributes.go +++ b/attributes/attributes.go @@ -19,61 +19,124 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. package attributes -import "fmt" +import ( + "fmt" + "strings" +) // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own -// types for keys. +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// interface{}) bool', it will be called by (*Attributes).Equal to determine +// whether two values with the same key should be considered equal. type Attributes struct { m map[interface{}]interface{} } -// New returns a new Attributes containing all key/value pairs in kvs. If the -// same key appears multiple times, the last value overwrites all previous -// values for that key. Panics if len(kvs) is not even. -func New(kvs ...interface{}) *Attributes { - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) - } - a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} - for i := 0; i < len(kvs)/2; i++ { - a.m[kvs[i*2]] = kvs[i*2+1] - } - return a +// New returns a new Attributes containing the key/value pair. +func New(key, value interface{}) *Attributes { + return &Attributes{m: map[interface{}]interface{}{key: value}} } -// WithValues returns a new Attributes containing all key/value pairs in a and -// kvs. Panics if len(kvs) is not even. If the same key appears multiple -// times, the last value overwrites all previous values for that key. To -// remove an existing key, use a nil value. -func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value interface{}) *Attributes { if a == nil { - return New(kvs...) - } - if len(kvs)%2 != 0 { - panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } - for i := 0; i < len(kvs)/2; i++ { - n.m[kvs[i*2]] = kvs[i*2+1] - } + n.m[key] = value return n } // Value returns the value associated with these attributes for key, or nil if -// no value is associated with key. +// no value is associated with key. The returned value should not be modified. func (a *Attributes) Value(key interface{}) interface{} { if a == nil { return nil } return a.m[key] } + +// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) +// bool' is implemented for a value in the attributes, it is called to +// determine if the value matches the one stored in the other attributes. If +// Equal is not implemented, standard equality is used to determine if the two +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x interface{}) string { + if v, ok := x.(fmt.Stringer); ok { + return v.String() + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/attributes/attributes_test.go b/attributes/attributes_test.go index 1174e2371a5f..02d5b24f3df1 100644 --- a/attributes/attributes_test.go +++ b/attributes/attributes_test.go @@ -20,41 +20,71 @@ package attributes_test import ( "fmt" - "reflect" "testing" "google.golang.org/grpc/attributes" ) +type stringVal struct { + s string +} + +func (s stringVal) Equal(o interface{}) bool { + os, ok := o.(stringVal) + return ok && s.s == os.s +} + func ExampleAttributes() { type keyOne struct{} type keyTwo struct{} - a := attributes.New(keyOne{}, 1, keyTwo{}, "two") + a := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "two"}) fmt.Println("Key one:", a.Value(keyOne{})) fmt.Println("Key two:", a.Value(keyTwo{})) // Output: // Key one: 1 - // Key two: two + // Key two: {two} } -func ExampleAttributes_WithValues() { +func ExampleAttributes_WithValue() { type keyOne struct{} type keyTwo struct{} a := attributes.New(keyOne{}, 1) - a = a.WithValues(keyTwo{}, "two") + a = a.WithValue(keyTwo{}, stringVal{s: "two"}) fmt.Println("Key one:", a.Value(keyOne{})) fmt.Println("Key two:", a.Value(keyTwo{})) // Output: // Key one: 1 - // Key two: two + // Key two: {two} } -// Test that two attributes with the same content are `reflect.DeepEqual`. -func TestDeepEqual(t *testing.T) { +// Test that two attributes with the same content are Equal. +func TestEqual(t *testing.T) { type keyOne struct{} - a1 := attributes.New(keyOne{}, 1) - a2 := attributes.New(keyOne{}, 1) - if !reflect.DeepEqual(a1, a2) { - t.Fatalf("reflect.DeepEqual(%+v, %+v), want true, got false", a1, a2) + type keyTwo struct{} + a1 := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "two"}) + a2 := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "two"}) + if !a1.Equal(a2) { + t.Fatalf("%+v.Equals(%+v) = false; want true", a1, a2) + } + if !a2.Equal(a1) { + t.Fatalf("%+v.Equals(%+v) = false; want true", a2, a1) + } +} + +// Test that two attributes with different content are not Equal. +func TestNotEqual(t *testing.T) { + type keyOne struct{} + type keyTwo struct{} + a1 := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "two"}) + a2 := attributes.New(keyOne{}, 2).WithValue(keyTwo{}, stringVal{s: "two"}) + a3 := attributes.New(keyOne{}, 1).WithValue(keyTwo{}, stringVal{s: "one"}) + if a1.Equal(a2) { + t.Fatalf("%+v.Equals(%+v) = true; want false", a1, a2) + } + if a2.Equal(a1) { + t.Fatalf("%+v.Equals(%+v) = true; want false", a2, a1) + } + if a3.Equal(a1) { + t.Fatalf("%+v.Equals(%+v) = true; want false", a3, a1) } } diff --git a/authz/audit/audit_logger.go b/authz/audit/audit_logger.go new file mode 100644 index 000000000000..b9b721970387 --- /dev/null +++ b/authz/audit/audit_logger.go @@ -0,0 +1,127 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package audit contains interfaces for audit logging during authorization. +package audit + +import ( + "encoding/json" + "sync" +) + +// loggerBuilderRegistry holds a map of audit logger builders and a mutex +// to facilitate thread-safe reading/writing operations. +type loggerBuilderRegistry struct { + mu sync.Mutex + builders map[string]LoggerBuilder +} + +var ( + registry = loggerBuilderRegistry{ + builders: make(map[string]LoggerBuilder), + } +) + +// RegisterLoggerBuilder registers the builder in a global map +// using b.Name() as the key. +// +// This should only be called during initialization time (i.e. in an init() +// function). If multiple builders are registered with the same name, +// the one registered last will take effect. +func RegisterLoggerBuilder(b LoggerBuilder) { + registry.mu.Lock() + defer registry.mu.Unlock() + registry.builders[b.Name()] = b +} + +// GetLoggerBuilder returns a builder with the given name. +// It returns nil if the builder is not found in the registry. +func GetLoggerBuilder(name string) LoggerBuilder { + registry.mu.Lock() + defer registry.mu.Unlock() + return registry.builders[name] +} + +// Event contains information passed to the audit logger as part of an +// audit logging event. +type Event struct { + // FullMethodName is the full method name of the audited RPC, in the format + // of "/pkg.Service/Method". For example, "/helloworld.Greeter/SayHello". + FullMethodName string + // Principal is the identity of the caller. Currently it will only be + // available in certificate-based TLS authentication. + Principal string + // PolicyName is the authorization policy name or the xDS RBAC filter name. + PolicyName string + // MatchedRule is the matched rule or policy name in the xDS RBAC filter. + // It will be empty if there is no match. + MatchedRule string + // Authorized indicates whether the audited RPC is authorized or not. + Authorized bool +} + +// LoggerConfig represents an opaque data structure holding an audit +// logger configuration. Concrete types representing configuration of specific +// audit loggers must embed this interface to implement it. +type LoggerConfig interface { + loggerConfig() +} + +// Logger is the interface to be implemented by audit loggers. +// +// An audit logger is a logger instance that can be configured via the +// authorization policy API or xDS HTTP RBAC filters. When the authorization +// decision meets the condition for audit, all the configured audit loggers' +// Log() method will be invoked to log that event. +// +// TODO(lwge): Change the link to the merged gRFC once it's ready. +// Please refer to https://github.com/grpc/proposal/pull/346 for more details +// about audit logging. +type Logger interface { + // Log performs audit logging for the provided audit event. + // + // This method is invoked in the RPC path and therefore implementations + // must not block. + Log(*Event) +} + +// LoggerBuilder is the interface to be implemented by audit logger +// builders that are used at runtime to configure and instantiate audit loggers. +// +// Users who want to implement their own audit logging logic should +// implement this interface, along with the Logger interface, and register +// it by calling RegisterLoggerBuilder() at init time. +// +// TODO(lwge): Change the link to the merged gRFC once it's ready. +// Please refer to https://github.com/grpc/proposal/pull/346 for more details +// about audit logging. +type LoggerBuilder interface { + // ParseLoggerConfig parses the given JSON bytes into a structured + // logger config this builder can use to build an audit logger. + ParseLoggerConfig(config json.RawMessage) (LoggerConfig, error) + // Build builds an audit logger with the given logger config. + // This will only be called with valid configs returned from + // ParseLoggerConfig() and any runtime issues such as failing to + // create a file should be handled by the logger implementation instead of + // failing the logger instantiation. So implementers need to make sure it + // can return a logger without error at this stage. + Build(LoggerConfig) Logger + // Name returns the name of logger built by this builder. + // This is used to register and pick the builder. + Name() string +} diff --git a/authz/audit/audit_logging_test.go b/authz/audit/audit_logging_test.go new file mode 100644 index 000000000000..e3a4ef25b021 --- /dev/null +++ b/authz/audit/audit_logging_test.go @@ -0,0 +1,377 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package audit_test + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "io" + "net" + "os" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/authz" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/status" + "google.golang.org/grpc/testdata" + + _ "google.golang.org/grpc/authz/audit/stdout" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type statAuditLogger struct { + authzDecisionStat map[bool]int // Map to hold the counts of authorization decisions + lastEvent *audit.Event // Field to store last received event +} + +func (s *statAuditLogger) Log(event *audit.Event) { + s.authzDecisionStat[event.Authorized]++ + *s.lastEvent = *event +} + +type loggerBuilder struct { + authzDecisionStat map[bool]int + lastEvent *audit.Event +} + +func (loggerBuilder) Name() string { + return "stat_logger" +} + +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &statAuditLogger{ + authzDecisionStat: lb.authzDecisionStat, + lastEvent: lb.lastEvent, + } +} + +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + return nil, nil +} + +// TestAuditLogger examines audit logging invocations using four different +// authorization policies. It covers scenarios including a disabled audit, +// auditing both 'allow' and 'deny' outcomes, and separately auditing 'allow' +// and 'deny' outcomes. Additionally, it checks if SPIFFE ID from a certificate +// is propagated correctly. +func (s) TestAuditLogger(t *testing.T) { + // Each test data entry contains an authz policy for a grpc server, + // how many 'allow' and 'deny' outcomes we expect (each test case makes 2 + // unary calls and one client-streaming call), and a structure to check if + // the audit.Event fields are properly populated. Additionally, we specify + // directly which authz outcome we expect from each type of call. + tests := []struct { + name string + authzPolicy string + wantAuthzOutcomes map[bool]int + eventContent *audit.Event + wantUnaryCallCode codes.Code + wantStreamingCallCode codes.Code + }{ + { + name: "No audit", + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_UnaryCall", + "request": { + "paths": [ + "/grpc.testing.TestService/UnaryCall" + ] + } + } + ], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stat_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantAuthzOutcomes: map[bool]int{true: 0, false: 0}, + wantUnaryCallCode: codes.OK, + wantStreamingCallCode: codes.PermissionDenied, + }, + { + name: "Allow All Deny Streaming - Audit All", + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_all", + "request": { + "paths": [ + "*" + ] + } + } + ], + "deny_rules": [ + { + "name": "deny_all", + "request": { + "paths": [ + "/grpc.testing.TestService/StreamingInputCall" + ] + } + } + ], + "audit_logging_options": { + "audit_condition": "ON_DENY_AND_ALLOW", + "audit_loggers": [ + { + "name": "stat_logger", + "config": {}, + "is_optional": false + }, + { + "name": "stdout_logger", + "is_optional": false + } + ] + } + }`, + wantAuthzOutcomes: map[bool]int{true: 2, false: 1}, + eventContent: &audit.Event{ + FullMethodName: "/grpc.testing.TestService/StreamingInputCall", + Principal: "spiffe://foo.bar.com/client/workload/1", + PolicyName: "authz", + MatchedRule: "authz_deny_all", + Authorized: false, + }, + wantUnaryCallCode: codes.OK, + wantStreamingCallCode: codes.PermissionDenied, + }, + { + name: "Allow Unary - Audit Allow", + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_UnaryCall", + "request": { + "paths": [ + "/grpc.testing.TestService/UnaryCall" + ] + } + } + ], + "audit_logging_options": { + "audit_condition": "ON_ALLOW", + "audit_loggers": [ + { + "name": "stat_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantAuthzOutcomes: map[bool]int{true: 2, false: 0}, + wantUnaryCallCode: codes.OK, + wantStreamingCallCode: codes.PermissionDenied, + }, + { + name: "Allow Typo - Audit Deny", + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_UnaryCall", + "request": { + "paths": [ + "/grpc.testing.TestService/UnaryCall_Z" + ] + } + } + ], + "audit_logging_options": { + "audit_condition": "ON_DENY", + "audit_loggers": [ + { + "name": "stat_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantAuthzOutcomes: map[bool]int{true: 0, false: 3}, + wantUnaryCallCode: codes.PermissionDenied, + wantStreamingCallCode: codes.PermissionDenied, + }, + } + // Construct the credentials for the tests and the stub server + serverCreds := loadServerCreds(t) + clientCreds := loadClientCreds(t) + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Setup test statAuditLogger, gRPC test server with authzPolicy, unary + // and stream interceptors. + lb := &loggerBuilder{ + authzDecisionStat: map[bool]int{true: 0, false: 0}, + lastEvent: &audit.Event{}, + } + audit.RegisterLoggerBuilder(lb) + i, _ := authz.NewStatic(test.authzPolicy) + + s := grpc.NewServer( + grpc.Creds(serverCreds), + grpc.ChainUnaryInterceptor(i.UnaryInterceptor), + grpc.ChainStreamInterceptor(i.StreamInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, ss) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error listening: %v", err) + } + go s.Serve(lis) + + // Setup gRPC test client with certificates containing a SPIFFE Id. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != test.wantUnaryCallCode { + t.Errorf("Unexpected UnaryCall fail: got %v want %v", err, test.wantUnaryCallCode) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != test.wantUnaryCallCode { + t.Errorf("Unexpected UnaryCall fail: got %v want %v", err, test.wantUnaryCallCode) + } + stream, err := client.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("StreamingInputCall failed:%v", err) + } + req := &testpb.StreamingInputCallRequest{ + Payload: &testpb.Payload{ + Body: []byte("hi"), + }, + } + if err := stream.Send(req); err != nil && err != io.EOF { + t.Fatalf("stream.Send failed:%v", err) + } + if _, err := stream.CloseAndRecv(); status.Code(err) != test.wantStreamingCallCode { + t.Errorf("Unexpected stream.CloseAndRecv fail: got %v want %v", err, test.wantStreamingCallCode) + } + + // Compare expected number of allows/denies with content of the internal + // map of statAuditLogger. + if diff := cmp.Diff(lb.authzDecisionStat, test.wantAuthzOutcomes); diff != "" { + t.Errorf("Authorization decisions do not match\ndiff (-got +want):\n%s", diff) + } + // Compare last event received by statAuditLogger with expected event. + if test.eventContent != nil { + if diff := cmp.Diff(lb.lastEvent, test.eventContent); diff != "" { + t.Errorf("Unexpected message\ndiff (-got +want):\n%s", diff) + } + } + }) + } +} + +// loadServerCreds constructs TLS containing server certs and CA +func loadServerCreds(t *testing.T) credentials.TransportCredentials { + t.Helper() + cert := loadKeys(t, "x509/server1_cert.pem", "x509/server1_key.pem") + certPool := loadCACerts(t, "x509/client_ca_cert.pem") + return credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{cert}, + ClientCAs: certPool, + }) +} + +// loadClientCreds constructs TLS containing client certs and CA +func loadClientCreds(t *testing.T) credentials.TransportCredentials { + t.Helper() + cert := loadKeys(t, "x509/client_with_spiffe_cert.pem", "x509/client_with_spiffe_key.pem") + roots := loadCACerts(t, "x509/server_ca_cert.pem") + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + ServerName: "x.test.example.com", + }) + +} + +// loadKeys loads X509 key pair from the provided file paths. +// It is used for loading both client and server certificates for the test +func loadKeys(t *testing.T, certPath, key string) tls.Certificate { + t.Helper() + cert, err := tls.LoadX509KeyPair(testdata.Path(certPath), testdata.Path(key)) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(%q, %q) failed: %v", certPath, key, err) + } + return cert +} + +// loadCACerts loads CA certificates and constructs x509.CertPool +// It is used for loading both client and server CAs for the test +func loadCACerts(t *testing.T, certPath string) *x509.CertPool { + t.Helper() + ca, err := os.ReadFile(testdata.Path(certPath)) + if err != nil { + t.Fatalf("os.ReadFile(%q) failed: %v", certPath, err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(ca) { + t.Fatal("Failed to append certificates") + } + return roots +} diff --git a/authz/audit/stdout/stdout_logger.go b/authz/audit/stdout/stdout_logger.go new file mode 100644 index 000000000000..c4ba21fa4682 --- /dev/null +++ b/authz/audit/stdout/stdout_logger.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stdout defines an stdout audit logger. +package stdout + +import ( + "encoding/json" + "log" + "os" + "time" + + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/grpclog" +) + +var grpcLogger = grpclog.Component("authz-audit") + +// Name is the string to identify this logger type in the registry +const Name = "stdout_logger" + +func init() { + audit.RegisterLoggerBuilder(&loggerBuilder{ + goLogger: log.New(os.Stdout, "", 0), + }) +} + +type event struct { + FullMethodName string `json:"rpc_method"` + Principal string `json:"principal"` + PolicyName string `json:"policy_name"` + MatchedRule string `json:"matched_rule"` + Authorized bool `json:"authorized"` + Timestamp string `json:"timestamp"` // Time when the audit event is logged via Log method +} + +// logger implements the audit.logger interface by logging to standard output. +type logger struct { + goLogger *log.Logger +} + +// Log marshals the audit.Event to json and prints it to standard output. +func (l *logger) Log(event *audit.Event) { + jsonContainer := map[string]interface{}{ + "grpc_audit_log": convertEvent(event), + } + jsonBytes, err := json.Marshal(jsonContainer) + if err != nil { + grpcLogger.Errorf("failed to marshal AuditEvent data to JSON: %v", err) + return + } + l.goLogger.Println(string(jsonBytes)) +} + +// loggerConfig represents the configuration for the stdout logger. +// It is currently empty and implements the audit.Logger interface by embedding it. +type loggerConfig struct { + audit.LoggerConfig +} + +type loggerBuilder struct { + goLogger *log.Logger +} + +func (loggerBuilder) Name() string { + return Name +} + +// Build returns a new instance of the stdout logger. +// Passed in configuration is ignored as the stdout logger does not +// expect any configuration to be provided. +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &logger{ + goLogger: lb.goLogger, + } +} + +// ParseLoggerConfig is a no-op since the stdout logger does not accept any configuration. +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + if len(config) != 0 && string(config) != "{}" { + grpcLogger.Warningf("Stdout logger doesn't support custom configs. Ignoring:\n%s", string(config)) + } + return &loggerConfig{}, nil +} + +func convertEvent(auditEvent *audit.Event) *event { + return &event{ + FullMethodName: auditEvent.FullMethodName, + Principal: auditEvent.Principal, + PolicyName: auditEvent.PolicyName, + MatchedRule: auditEvent.MatchedRule, + Authorized: auditEvent.Authorized, + Timestamp: time.Now().Format(time.RFC3339Nano), + } +} diff --git a/authz/audit/stdout/stdout_logger_test.go b/authz/audit/stdout/stdout_logger_test.go new file mode 100644 index 000000000000..a389b942e2c7 --- /dev/null +++ b/authz/audit/stdout/stdout_logger_test.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stdout + +import ( + "bytes" + "encoding/json" + "log" + "os" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestStdoutLogger_Log(t *testing.T) { + tests := map[string]struct { + event *audit.Event + wantMessage string + wantErr string + }{ + "few fields": { + event: &audit.Event{PolicyName: "test policy", Principal: "test principal"}, + wantMessage: `{"fullMethodName":"","principal":"test principal","policyName":"test policy","matchedRule":"","authorized":false`, + }, + "all fields": { + event: &audit.Event{ + FullMethodName: "/helloworld.Greeter/SayHello", + Principal: "spiffe://example.org/ns/default/sa/default/backend", + PolicyName: "example-policy", + MatchedRule: "dev-access", + Authorized: true, + }, + wantMessage: `{"fullMethodName":"/helloworld.Greeter/SayHello",` + + `"principal":"spiffe://example.org/ns/default/sa/default/backend","policyName":"example-policy",` + + `"matchedRule":"dev-access","authorized":true`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + before := time.Now().Unix() + var buf bytes.Buffer + builder := &loggerBuilder{goLogger: log.New(&buf, "", 0)} + auditLogger := builder.Build(nil) + + auditLogger.Log(test.event) + + var container map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &container); err != nil { + t.Fatalf("Failed to unmarshal audit log event: %v", err) + } + innerEvent := extractEvent(container["grpc_audit_log"].(map[string]interface{})) + if innerEvent.Timestamp == "" { + t.Fatalf("Resulted event has no timestamp: %v", innerEvent) + } + after := time.Now().Unix() + innerEventUnixTime, err := time.Parse(time.RFC3339Nano, innerEvent.Timestamp) + if err != nil { + t.Fatalf("Failed to convert event timestamp into Unix time format: %v", err) + } + if before > innerEventUnixTime.Unix() || after < innerEventUnixTime.Unix() { + t.Errorf("The audit event timestamp is outside of the test interval: test start %v, event timestamp %v, test end %v", before, innerEventUnixTime.Unix(), after) + } + if diff := cmp.Diff(trimEvent(innerEvent), test.event); diff != "" { + t.Fatalf("Unexpected message\ndiff (-got +want):\n%s", diff) + } + }) + } +} + +func (s) TestStdoutLoggerBuilder_NilConfig(t *testing.T) { + builder := &loggerBuilder{ + goLogger: log.New(os.Stdout, "", log.LstdFlags), + } + config, err := builder.ParseLoggerConfig(nil) + if err != nil { + t.Fatalf("Failed to parse stdout logger configuration: %v", err) + } + if l := builder.Build(config); l == nil { + t.Fatal("Failed to build stdout audit logger") + } +} + +func (s) TestStdoutLoggerBuilder_Registration(t *testing.T) { + if audit.GetLoggerBuilder("stdout_logger") == nil { + t.Fatal("stdout logger is not registered") + } +} + +// extractEvent extracts an stdout.event from a map +// unmarshalled from a logged json message. +func extractEvent(container map[string]interface{}) event { + return event{ + FullMethodName: container["rpc_method"].(string), + Principal: container["principal"].(string), + PolicyName: container["policy_name"].(string), + MatchedRule: container["matched_rule"].(string), + Authorized: container["authorized"].(bool), + Timestamp: container["timestamp"].(string), + } +} + +// trimEvent converts a logged stdout.event into an audit.Event +// by removing Timestamp field. It is used for comparing events during testing. +func trimEvent(testEvent event) *audit.Event { + return &audit.Event{ + FullMethodName: testEvent.FullMethodName, + Principal: testEvent.Principal, + PolicyName: testEvent.PolicyName, + MatchedRule: testEvent.MatchedRule, + Authorized: testEvent.Authorized, + } +} diff --git a/authz/grpc_authz_end2end_test.go b/authz/grpc_authz_end2end_test.go new file mode 100644 index 000000000000..0a4cd1862e98 --- /dev/null +++ b/authz/grpc_authz_end2end_test.go @@ -0,0 +1,727 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package authz_test + +import ( + "context" + "crypto/tls" + "crypto/x509" + "io" + "net" + "os" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/authz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/testdata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +type testServer struct { + testgrpc.UnimplementedTestServiceServer +} + +func (s *testServer) UnaryCall(ctx context.Context, req *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil +} + +func (s *testServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&testpb.StreamingInputCallResponse{}) + } + if err != nil { + return err + } + } +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +var authzTests = map[string]struct { + authzPolicy string + md metadata.MD + wantStatus *status.Status +}{ + "DeniesRPCMatchInDenyNoMatchInAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_StreamingOutputCall", + "request": { + "paths": + [ + "/grpc.testing.TestService/StreamingOutputCall" + ] + } + } + ], + "deny_rules": + [ + { + "name": "deny_TestServiceCalls", + "request": { + "paths": + [ + "/grpc.testing.TestService/*" + ], + "headers": + [ + { + "key": "key-abc", + "values": + [ + "val-abc", + "val-def" + ] + } + ] + } + } + ] + }`, + md: metadata.Pairs("key-abc", "val-abc"), + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "DeniesRPCMatchInDenyAndAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_all", + "request": { + "paths": + [ + "*" + ] + } + } + ], + "deny_rules": + [ + { + "name": "deny_all", + "request": { + "paths": + [ + "*" + ] + } + } + ] + }`, + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "AllowsRPCNoMatchInDenyMatchInAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_all" + } + ], + "deny_rules": + [ + { + "name": "deny_TestServiceCalls", + "request": { + "paths": + [ + "/grpc.testing.TestService/UnaryCall", + "/grpc.testing.TestService/StreamingInputCall" + ], + "headers": + [ + { + "key": "key-abc", + "values": + [ + "val-abc", + "val-def" + ] + } + ] + } + } + ] + }`, + md: metadata.Pairs("key-xyz", "val-xyz"), + wantStatus: status.New(codes.OK, ""), + }, + "DeniesRPCNoMatchInDenyAndAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_some_user", + "source": { + "principals": + [ + "some_user" + ] + } + } + ], + "deny_rules": + [ + { + "name": "deny_StreamingOutputCall", + "request": { + "paths": + [ + "/grpc.testing.TestService/StreamingOutputCall" + ] + } + } + ] + }`, + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "AllowsRPCEmptyDenyMatchInAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_UnaryCall", + "request": + { + "paths": + [ + "/grpc.testing.TestService/UnaryCall" + ] + } + }, + { + "name": "allow_StreamingInputCall", + "request": + { + "paths": + [ + "/grpc.testing.TestService/StreamingInputCall" + ] + } + } + ] + }`, + wantStatus: status.New(codes.OK, ""), + }, + "DeniesRPCEmptyDenyNoMatchInAllow": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_StreamingOutputCall", + "request": + { + "paths": + [ + "/grpc.testing.TestService/StreamingOutputCall" + ] + } + } + ] + }`, + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "DeniesRPCRequestWithPrincipalsFieldOnUnauthenticatedConnection": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_authenticated", + "source": { + "principals": ["*", ""] + } + } + ] + }`, + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, + "DeniesRPCRequestNoMatchInAllowFailsPresenceMatch": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_TestServiceCalls", + "request": { + "paths": + [ + "/grpc.testing.TestService/*" + ], + "headers": + [ + { + "key": "key-abc", + "values": + [ + "*" + ] + } + ] + } + } + ] + }`, + md: metadata.Pairs("key-abc", ""), + wantStatus: status.New(codes.PermissionDenied, "unauthorized RPC request rejected"), + }, +} + +func (s) TestStaticPolicyEnd2End(t *testing.T) { + for name, test := range authzTests { + t.Run(name, func(t *testing.T) { + // Start a gRPC server with gRPC authz unary and stream server interceptors. + i, _ := authz.NewStatic(test.authzPolicy) + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor), + grpc.ChainStreamInterceptor(i.StreamInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + ctx = metadata.NewOutgoingContext(ctx, test.md) + + // Verifying authorization decision for Unary RPC. + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { + t.Fatalf("[UnaryCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) + } + + // Verifying authorization decision for Streaming RPC. + stream, err := client.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("failed StreamingInputCall err: %v", err) + } + req := &testpb.StreamingInputCallRequest{ + Payload: &testpb.Payload{ + Body: []byte("hi"), + }, + } + if err := stream.Send(req); err != nil && err != io.EOF { + t.Fatalf("failed stream.Send err: %v", err) + } + _, err = stream.CloseAndRecv() + if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { + t.Fatalf("[StreamingCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) + } + }) + } +} + +func (s) TestAllowsRPCRequestWithPrincipalsFieldOnTLSAuthenticatedConnection(t *testing.T) { + authzPolicy := `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_authenticated", + "source": { + "principals": ["*", ""] + } + } + ] + }` + // Start a gRPC server with gRPC authz unary server interceptor. + i, _ := authz.NewStatic(authzPolicy) + creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) + if err != nil { + t.Fatalf("failed to generate credentials: %v", err) + } + s := grpc.NewServer( + grpc.Creds(creds), + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + go s.Serve(lis) + + // Establish a connection to the server. + creds, err = credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com") + if err != nil { + t.Fatalf("failed to load credentials: %v", err) + } + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(creds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + if _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("client.UnaryCall(_, _) = %v; want nil", err) + } +} + +func (s) TestAllowsRPCRequestWithPrincipalsFieldOnMTLSAuthenticatedConnection(t *testing.T) { + authzPolicy := `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_authenticated", + "source": { + "principals": ["*", ""] + } + } + ] + }` + // Start a gRPC server with gRPC authz unary server interceptor. + i, _ := authz.NewStatic(authzPolicy) + cert, err := tls.LoadX509KeyPair(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(x509/server1_cert.pem, x509/server1_key.pem) failed: %v", err) + } + ca, err := os.ReadFile(testdata.Path("x509/client_ca_cert.pem")) + if err != nil { + t.Fatalf("os.ReadFile(x509/client_ca_cert.pem) failed: %v", err) + } + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(ca) { + t.Fatal("failed to append certificates") + } + creds := credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{cert}, + ClientCAs: certPool, + }) + s := grpc.NewServer( + grpc.Creds(creds), + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + go s.Serve(lis) + + // Establish a connection to the server. + cert, err = tls.LoadX509KeyPair(testdata.Path("x509/client1_cert.pem"), testdata.Path("x509/client1_key.pem")) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) + } + ca, err = os.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + if err != nil { + t.Fatalf("os.ReadFile(x509/server_ca_cert.pem) failed: %v", err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(ca) { + t.Fatal("failed to append certificates") + } + creds = credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + ServerName: "x.test.example.com", + }) + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(creds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + if _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("client.UnaryCall(_, _) = %v; want nil", err) + } +} + +func (s) TestFileWatcherEnd2End(t *testing.T) { + for name, test := range authzTests { + t.Run(name, func(t *testing.T) { + file := createTmpPolicyFile(t, name, []byte(test.authzPolicy)) + i, _ := authz.NewFileWatcher(file, 1*time.Second) + defer i.Close() + + // Start a gRPC server with gRPC authz unary and stream server interceptors. + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor), + grpc.ChainStreamInterceptor(i.StreamInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer lis.Close() + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + ctx = metadata.NewOutgoingContext(ctx, test.md) + + // Verifying authorization decision for Unary RPC. + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { + t.Fatalf("[UnaryCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) + } + + // Verifying authorization decision for Streaming RPC. + stream, err := client.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("failed StreamingInputCall err: %v", err) + } + req := &testpb.StreamingInputCallRequest{ + Payload: &testpb.Payload{ + Body: []byte("hi"), + }, + } + if err := stream.Send(req); err != nil && err != io.EOF { + t.Fatalf("failed stream.Send err: %v", err) + } + _, err = stream.CloseAndRecv() + if got := status.Convert(err); got.Code() != test.wantStatus.Code() || got.Message() != test.wantStatus.Message() { + t.Fatalf("[StreamingCall] error want:{%v} got:{%v}", test.wantStatus.Err(), got.Err()) + } + }) + } +} + +func retryUntil(ctx context.Context, tsc testgrpc.TestServiceClient, want *status.Status) (lastErr error) { + for ctx.Err() == nil { + _, lastErr = tsc.UnaryCall(ctx, &testpb.SimpleRequest{}) + if s := status.Convert(lastErr); s.Code() == want.Code() && s.Message() == want.Message() { + return nil + } + time.Sleep(20 * time.Millisecond) + } + return lastErr +} + +func (s) TestFileWatcher_ValidPolicyRefresh(t *testing.T) { + valid1 := authzTests["DeniesRPCMatchInDenyAndAllow"] + file := createTmpPolicyFile(t, "valid_policy_refresh", []byte(valid1.authzPolicy)) + i, _ := authz.NewFileWatcher(file, 100*time.Millisecond) + defer i.Close() + + // Start a gRPC server with gRPC authz unary server interceptor. + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer lis.Close() + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) + } + + // Rewrite the file with a different valid authorization policy. + valid2 := authzTests["AllowsRPCEmptyDenyMatchInAllow"] + if err := os.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", file, err) + } + + // Verifying authorization decision. + if got := retryUntil(ctx, client, valid2.wantStatus); got != nil { + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got, valid2.wantStatus.Err()) + } +} + +func (s) TestFileWatcher_InvalidPolicySkipReload(t *testing.T) { + valid := authzTests["DeniesRPCMatchInDenyAndAllow"] + file := createTmpPolicyFile(t, "invalid_policy_skip_reload", []byte(valid.authzPolicy)) + i, _ := authz.NewFileWatcher(file, 20*time.Millisecond) + defer i.Close() + + // Start a gRPC server with gRPC authz unary server interceptors. + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer lis.Close() + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid.wantStatus.Code() || got.Message() != valid.wantStatus.Message() { + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid.wantStatus.Err()) + } + + // Skips the invalid policy update, and continues to use the valid policy. + if err := os.WriteFile(file, []byte("{}"), os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", file, err) + } + + // Wait 40 ms for background go routine to read updated files. + time.Sleep(40 * time.Millisecond) + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid.wantStatus.Code() || got.Message() != valid.wantStatus.Message() { + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid.wantStatus.Err()) + } +} + +func (s) TestFileWatcher_RecoversFromReloadFailure(t *testing.T) { + valid1 := authzTests["DeniesRPCMatchInDenyAndAllow"] + file := createTmpPolicyFile(t, "recovers_from_reload_failure", []byte(valid1.authzPolicy)) + i, _ := authz.NewFileWatcher(file, 100*time.Millisecond) + defer i.Close() + + // Start a gRPC server with gRPC authz unary server interceptors. + s := grpc.NewServer( + grpc.ChainUnaryInterceptor(i.UnaryInterceptor)) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, &testServer{}) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + defer lis.Close() + go s.Serve(lis) + + // Establish a connection to the server. + clientConn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", lis.Addr().String(), err) + } + defer clientConn.Close() + client := testgrpc.NewTestServiceClient(clientConn) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) + } + + // Skips the invalid policy update, and continues to use the valid policy. + if err := os.WriteFile(file, []byte("{}"), os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", file, err) + } + + // Wait 120 ms for background go routine to read updated files. + time.Sleep(120 * time.Millisecond) + + // Verifying authorization decision. + _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}) + if got := status.Convert(err); got.Code() != valid1.wantStatus.Code() || got.Message() != valid1.wantStatus.Message() { + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got.Err(), valid1.wantStatus.Err()) + } + + // Rewrite the file with a different valid authorization policy. + valid2 := authzTests["AllowsRPCEmptyDenyMatchInAllow"] + if err := os.WriteFile(file, []byte(valid2.authzPolicy), os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", file, err) + } + + // Verifying authorization decision. + if got := retryUntil(ctx, client, valid2.wantStatus); got != nil { + t.Fatalf("client.UnaryCall(_, _) = %v; want = %v", got, valid2.wantStatus.Err()) + } +} diff --git a/authz/grpc_authz_server_interceptors.go b/authz/grpc_authz_server_interceptors.go new file mode 100644 index 000000000000..3e5f598a97d1 --- /dev/null +++ b/authz/grpc_authz_server_interceptors.go @@ -0,0 +1,178 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package authz + +import ( + "bytes" + "context" + "fmt" + "os" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/xds/rbac" + "google.golang.org/grpc/status" +) + +var logger = grpclog.Component("authz") + +// StaticInterceptor contains engines used to make authorization decisions. It +// either contains two engines deny engine followed by an allow engine or only +// one allow engine. +type StaticInterceptor struct { + engines rbac.ChainEngine +} + +// NewStatic returns a new StaticInterceptor from a static authorization policy +// JSON string. +func NewStatic(authzPolicy string) (*StaticInterceptor, error) { + rbacs, policyName, err := translatePolicy(authzPolicy) + if err != nil { + return nil, err + } + chainEngine, err := rbac.NewChainEngine(rbacs, policyName) + if err != nil { + return nil, err + } + return &StaticInterceptor{*chainEngine}, nil +} + +// UnaryInterceptor intercepts incoming Unary RPC requests. +// Only authorized requests are allowed to pass. Otherwise, an unauthorized +// error is returned to the client. +func (i *StaticInterceptor) UnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := i.engines.IsAuthorized(ctx) + if err != nil { + if status.Code(err) == codes.PermissionDenied { + if logger.V(2) { + logger.Infof("unauthorized RPC request rejected: %v", err) + } + return nil, status.Errorf(codes.PermissionDenied, "unauthorized RPC request rejected") + } + return nil, err + } + return handler(ctx, req) +} + +// StreamInterceptor intercepts incoming Stream RPC requests. +// Only authorized requests are allowed to pass. Otherwise, an unauthorized +// error is returned to the client. +func (i *StaticInterceptor) StreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + err := i.engines.IsAuthorized(ss.Context()) + if err != nil { + if status.Code(err) == codes.PermissionDenied { + if logger.V(2) { + logger.Infof("unauthorized RPC request rejected: %v", err) + } + return status.Errorf(codes.PermissionDenied, "unauthorized RPC request rejected") + } + return err + } + return handler(srv, ss) +} + +// FileWatcherInterceptor contains details used to make authorization decisions +// by watching a file path that contains authorization policy in JSON format. +type FileWatcherInterceptor struct { + internalInterceptor unsafe.Pointer // *StaticInterceptor + policyFile string + policyContents []byte + refreshDuration time.Duration + cancel context.CancelFunc +} + +// NewFileWatcher returns a new FileWatcherInterceptor from a policy file +// that contains JSON string of authorization policy and a refresh duration to +// specify the amount of time between policy refreshes. +func NewFileWatcher(file string, duration time.Duration) (*FileWatcherInterceptor, error) { + if file == "" { + return nil, fmt.Errorf("authorization policy file path is empty") + } + if duration <= time.Duration(0) { + return nil, fmt.Errorf("requires refresh interval(%v) greater than 0s", duration) + } + i := &FileWatcherInterceptor{policyFile: file, refreshDuration: duration} + if err := i.updateInternalInterceptor(); err != nil { + return nil, err + } + ctx, cancel := context.WithCancel(context.Background()) + i.cancel = cancel + // Create a background go routine for policy refresh. + go i.run(ctx) + return i, nil +} + +func (i *FileWatcherInterceptor) run(ctx context.Context) { + ticker := time.NewTicker(i.refreshDuration) + for { + if err := i.updateInternalInterceptor(); err != nil { + logger.Warningf("authorization policy reload status err: %v", err) + } + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + } + } +} + +// updateInternalInterceptor checks if the policy file that is watching has changed, +// and if so, updates the internalInterceptor with the policy. Unlike the +// constructor, if there is an error in reading the file or parsing the policy, the +// previous internalInterceptors will not be replaced. +func (i *FileWatcherInterceptor) updateInternalInterceptor() error { + policyContents, err := os.ReadFile(i.policyFile) + if err != nil { + return fmt.Errorf("policyFile(%s) read failed: %v", i.policyFile, err) + } + if bytes.Equal(i.policyContents, policyContents) { + return nil + } + i.policyContents = policyContents + policyContentsString := string(policyContents) + interceptor, err := NewStatic(policyContentsString) + if err != nil { + return err + } + atomic.StorePointer(&i.internalInterceptor, unsafe.Pointer(interceptor)) + logger.Infof("authorization policy reload status: successfully loaded new policy %v", policyContentsString) + return nil +} + +// Close cleans up resources allocated by the interceptor. +func (i *FileWatcherInterceptor) Close() { + i.cancel() +} + +// UnaryInterceptor intercepts incoming Unary RPC requests. +// Only authorized requests are allowed to pass. Otherwise, an unauthorized +// error is returned to the client. +func (i *FileWatcherInterceptor) UnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return ((*StaticInterceptor)(atomic.LoadPointer(&i.internalInterceptor))).UnaryInterceptor(ctx, req, info, handler) +} + +// StreamInterceptor intercepts incoming Stream RPC requests. +// Only authorized requests are allowed to pass. Otherwise, an unauthorized +// error is returned to the client. +func (i *FileWatcherInterceptor) StreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return ((*StaticInterceptor)(atomic.LoadPointer(&i.internalInterceptor))).StreamInterceptor(srv, ss, info, handler) +} diff --git a/authz/grpc_authz_server_interceptors_test.go b/authz/grpc_authz_server_interceptors_test.go new file mode 100644 index 000000000000..ae74c896d960 --- /dev/null +++ b/authz/grpc_authz_server_interceptors_test.go @@ -0,0 +1,120 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package authz_test + +import ( + "fmt" + "os" + "path" + "testing" + "time" + + "google.golang.org/grpc/authz" +) + +func createTmpPolicyFile(t *testing.T, dirSuffix string, policy []byte) string { + t.Helper() + + // Create a temp directory. Passing an empty string for the first argument + // uses the system temp directory. + dir, err := os.MkdirTemp("", dirSuffix) + if err != nil { + t.Fatalf("os.MkdirTemp() failed: %v", err) + } + t.Logf("Using tmpdir: %s", dir) + // Write policy into file. + filename := path.Join(dir, "policy.json") + if err := os.WriteFile(filename, policy, os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", filename, err) + } + t.Logf("Wrote policy %s to file at %s", string(policy), filename) + return filename +} + +func (s) TestNewStatic(t *testing.T) { + tests := map[string]struct { + authzPolicy string + wantErr error + }{ + "InvalidPolicyFailsToCreateInterceptor": { + authzPolicy: `{}`, + wantErr: fmt.Errorf(`"name" is not present`), + }, + "ValidPolicyCreatesInterceptor": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_all" + } + ] + }`, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if _, err := authz.NewStatic(test.authzPolicy); fmt.Sprint(err) != fmt.Sprint(test.wantErr) { + t.Fatalf("NewStatic(%v) returned err: %v, want err: %v", test.authzPolicy, err, test.wantErr) + } + }) + } +} + +func (s) TestNewFileWatcher(t *testing.T) { + tests := map[string]struct { + authzPolicy string + refreshDuration time.Duration + wantErr error + }{ + "InvalidRefreshDurationFailsToCreateInterceptor": { + refreshDuration: time.Duration(0), + wantErr: fmt.Errorf("requires refresh interval(0s) greater than 0s"), + }, + "InvalidPolicyFailsToCreateInterceptor": { + authzPolicy: `{}`, + refreshDuration: time.Duration(1), + wantErr: fmt.Errorf(`"name" is not present`), + }, + "ValidPolicyCreatesInterceptor": { + authzPolicy: `{ + "name": "authz", + "allow_rules": + [ + { + "name": "allow_all" + } + ] + }`, + refreshDuration: time.Duration(1), + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + file := createTmpPolicyFile(t, name, []byte(test.authzPolicy)) + i, err := authz.NewFileWatcher(file, test.refreshDuration) + if fmt.Sprint(err) != fmt.Sprint(test.wantErr) { + t.Fatalf("NewFileWatcher(%v) returned err: %v, want err: %v", test.authzPolicy, err, test.wantErr) + } + if i != nil { + i.Close() + } + }) + } +} diff --git a/authz/rbac_translator.go b/authz/rbac_translator.go new file mode 100644 index 000000000000..730ec9dc426a --- /dev/null +++ b/authz/rbac_translator.go @@ -0,0 +1,398 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package authz exposes methods to manage authorization within gRPC. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed +// in a later release. +package authz + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +// This is used when converting a custom config from raw JSON to a TypedStruct +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/" +const typeURLPrefix = "grpc.authz.audit_logging/" + +type header struct { + Key string + Values []string +} + +type peer struct { + Principals []string +} + +type request struct { + Paths []string + Headers []header +} + +type rule struct { + Name string + Source peer + Request request +} + +type auditLogger struct { + Name string `json:"name"` + Config *structpb.Struct `json:"config"` + IsOptional bool `json:"is_optional"` +} + +type auditLoggingOptions struct { + AuditCondition string `json:"audit_condition"` + AuditLoggers []*auditLogger `json:"audit_loggers"` +} + +// Represents the SDK authorization policy provided by user. +type authorizationPolicy struct { + Name string + DenyRules []rule `json:"deny_rules"` + AllowRules []rule `json:"allow_rules"` + AuditLoggingOptions auditLoggingOptions `json:"audit_logging_options"` +} + +func principalOr(principals []*v3rbacpb.Principal) *v3rbacpb.Principal { + return &v3rbacpb.Principal{ + Identifier: &v3rbacpb.Principal_OrIds{ + OrIds: &v3rbacpb.Principal_Set{ + Ids: principals, + }, + }, + } +} + +func permissionOr(permission []*v3rbacpb.Permission) *v3rbacpb.Permission { + return &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_OrRules{ + OrRules: &v3rbacpb.Permission_Set{ + Rules: permission, + }, + }, + } +} + +func permissionAnd(permission []*v3rbacpb.Permission) *v3rbacpb.Permission { + return &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_AndRules{ + AndRules: &v3rbacpb.Permission_Set{ + Rules: permission, + }, + }, + } +} + +func getStringMatcher(value string) *v3matcherpb.StringMatcher { + switch { + case value == "*": + return &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{ + SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + } + case strings.HasSuffix(value, "*"): + prefix := strings.TrimSuffix(value, "*") + return &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: prefix}, + } + case strings.HasPrefix(value, "*"): + suffix := strings.TrimPrefix(value, "*") + return &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: suffix}, + } + default: + return &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: value}, + } + } +} + +func getHeaderMatcher(key, value string) *v3routepb.HeaderMatcher { + switch { + case value == "*": + return &v3routepb.HeaderMatcher{ + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{ + SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + } + case strings.HasSuffix(value, "*"): + prefix := strings.TrimSuffix(value, "*") + return &v3routepb.HeaderMatcher{ + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: prefix}, + } + case strings.HasPrefix(value, "*"): + suffix := strings.TrimPrefix(value, "*") + return &v3routepb.HeaderMatcher{ + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: suffix}, + } + default: + return &v3routepb.HeaderMatcher{ + Name: key, + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: value}, + } + } +} + +func parsePrincipalNames(principalNames []string) []*v3rbacpb.Principal { + ps := make([]*v3rbacpb.Principal, 0, len(principalNames)) + for _, principalName := range principalNames { + newPrincipalName := &v3rbacpb.Principal{ + Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{ + PrincipalName: getStringMatcher(principalName), + }, + }} + ps = append(ps, newPrincipalName) + } + return ps +} + +func parsePeer(source peer) *v3rbacpb.Principal { + if len(source.Principals) == 0 { + return &v3rbacpb.Principal{ + Identifier: &v3rbacpb.Principal_Any{ + Any: true, + }, + } + } + return principalOr(parsePrincipalNames(source.Principals)) +} + +func parsePaths(paths []string) []*v3rbacpb.Permission { + ps := make([]*v3rbacpb.Permission, 0, len(paths)) + for _, path := range paths { + newPath := &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{ + Rule: &v3matcherpb.PathMatcher_Path{Path: getStringMatcher(path)}}}} + ps = append(ps, newPath) + } + return ps +} + +func parseHeaderValues(key string, values []string) []*v3rbacpb.Permission { + vs := make([]*v3rbacpb.Permission, 0, len(values)) + for _, value := range values { + newHeader := &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_Header{ + Header: getHeaderMatcher(key, value)}} + vs = append(vs, newHeader) + } + return vs +} + +var unsupportedHeaders = map[string]bool{ + "host": true, + "connection": true, + "keep-alive": true, + "proxy-authenticate": true, + "proxy-authorization": true, + "te": true, + "trailer": true, + "transfer-encoding": true, + "upgrade": true, +} + +func unsupportedHeader(key string) bool { + return key[0] == ':' || strings.HasPrefix(key, "grpc-") || unsupportedHeaders[key] +} + +func parseHeaders(headers []header) ([]*v3rbacpb.Permission, error) { + hs := make([]*v3rbacpb.Permission, 0, len(headers)) + for i, header := range headers { + if header.Key == "" { + return nil, fmt.Errorf(`"headers" %d: "key" is not present`, i) + } + header.Key = strings.ToLower(header.Key) + if unsupportedHeader(header.Key) { + return nil, fmt.Errorf(`"headers" %d: unsupported "key" %s`, i, header.Key) + } + if len(header.Values) == 0 { + return nil, fmt.Errorf(`"headers" %d: "values" is not present`, i) + } + values := parseHeaderValues(header.Key, header.Values) + hs = append(hs, permissionOr(values)) + } + return hs, nil +} + +func parseRequest(request request) (*v3rbacpb.Permission, error) { + var and []*v3rbacpb.Permission + if len(request.Paths) > 0 { + and = append(and, permissionOr(parsePaths(request.Paths))) + } + if len(request.Headers) > 0 { + headers, err := parseHeaders(request.Headers) + if err != nil { + return nil, err + } + and = append(and, permissionAnd(headers)) + } + if len(and) > 0 { + return permissionAnd(and), nil + } + return &v3rbacpb.Permission{ + Rule: &v3rbacpb.Permission_Any{ + Any: true, + }, + }, nil +} + +func parseRules(rules []rule, prefixName string) (map[string]*v3rbacpb.Policy, error) { + policies := make(map[string]*v3rbacpb.Policy) + for i, rule := range rules { + if rule.Name == "" { + return policies, fmt.Errorf(`%d: "name" is not present`, i) + } + permission, err := parseRequest(rule.Request) + if err != nil { + return nil, fmt.Errorf("%d: %v", i, err) + } + policyName := prefixName + "_" + rule.Name + policies[policyName] = &v3rbacpb.Policy{ + Principals: []*v3rbacpb.Principal{parsePeer(rule.Source)}, + Permissions: []*v3rbacpb.Permission{permission}, + } + } + return policies, nil +} + +// Parse auditLoggingOptions to the associated RBAC protos. The single +// auditLoggingOptions results in two different parsed protos, one for the allow +// policy and one for the deny policy +func (options *auditLoggingOptions) toProtos() (allow *v3rbacpb.RBAC_AuditLoggingOptions, deny *v3rbacpb.RBAC_AuditLoggingOptions, err error) { + allow = &v3rbacpb.RBAC_AuditLoggingOptions{} + deny = &v3rbacpb.RBAC_AuditLoggingOptions{} + + if options.AuditCondition != "" { + rbacCondition, ok := v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition_value[options.AuditCondition] + if !ok { + return nil, nil, fmt.Errorf("failed to parse AuditCondition %v. Allowed values {NONE, ON_DENY, ON_ALLOW, ON_DENY_AND_ALLOW}", options.AuditCondition) + } + allow.AuditCondition = v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition(rbacCondition) + deny.AuditCondition = toDenyCondition(v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition(rbacCondition)) + } + + for i, config := range options.AuditLoggers { + if config.Name == "" { + return nil, nil, fmt.Errorf("missing required field: name in audit_logging_options.audit_loggers[%v]", i) + } + if config.Config == nil { + config.Config = &structpb.Struct{} + } + typedStruct := &v1xdsudpatypepb.TypedStruct{ + TypeUrl: typeURLPrefix + config.Name, + Value: config.Config, + } + customConfig, err := anypb.New(typedStruct) + if err != nil { + return nil, nil, fmt.Errorf("error parsing custom audit logger config: %v", err) + } + + logger := &v3corepb.TypedExtensionConfig{Name: config.Name, TypedConfig: customConfig} + rbacConfig := v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + IsOptional: config.IsOptional, + AuditLogger: logger, + } + allow.LoggerConfigs = append(allow.LoggerConfigs, &rbacConfig) + deny.LoggerConfigs = append(deny.LoggerConfigs, &rbacConfig) + } + + return allow, deny, nil +} + +// Maps the AuditCondition coming from AuditLoggingOptions to the proper +// condition for the deny policy RBAC proto +func toDenyCondition(condition v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition) v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition { + // Mapping the overall policy AuditCondition to what it must be for the Deny and Allow RBAC + // See gRPC A59 for details - https://github.com/grpc/proposal/pull/346/files + // |Authorization Policy |DENY RBAC |ALLOW RBAC | + // |----------------------|-------------------|---------------------| + // |NONE |NONE |NONE | + // |ON_DENY |ON_DENY |ON_DENY | + // |ON_ALLOW |NONE |ON_ALLOW | + // |ON_DENY_AND_ALLOW |ON_DENY |ON_DENY_AND_ALLOW | + switch condition { + case v3rbacpb.RBAC_AuditLoggingOptions_NONE: + return v3rbacpb.RBAC_AuditLoggingOptions_NONE + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY: + return v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY + case v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW: + return v3rbacpb.RBAC_AuditLoggingOptions_NONE + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW: + return v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY + default: + return v3rbacpb.RBAC_AuditLoggingOptions_NONE + } +} + +// translatePolicy translates SDK authorization policy in JSON format to two +// Envoy RBAC polices (deny followed by allow policy) or only one Envoy RBAC +// allow policy. Also returns the overall policy name. If the input policy +// cannot be parsed or is invalid, an error will be returned. +func translatePolicy(policyStr string) ([]*v3rbacpb.RBAC, string, error) { + policy := &authorizationPolicy{} + d := json.NewDecoder(bytes.NewReader([]byte(policyStr))) + d.DisallowUnknownFields() + if err := d.Decode(policy); err != nil { + return nil, "", fmt.Errorf("failed to unmarshal policy: %v", err) + } + if policy.Name == "" { + return nil, "", fmt.Errorf(`"name" is not present`) + } + if len(policy.AllowRules) == 0 { + return nil, "", fmt.Errorf(`"allow_rules" is not present`) + } + allowLogger, denyLogger, err := policy.AuditLoggingOptions.toProtos() + if err != nil { + return nil, "", err + } + rbacs := make([]*v3rbacpb.RBAC, 0, 2) + if len(policy.DenyRules) > 0 { + denyPolicies, err := parseRules(policy.DenyRules, policy.Name) + if err != nil { + return nil, "", fmt.Errorf(`"deny_rules" %v`, err) + } + denyRBAC := &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_DENY, + Policies: denyPolicies, + AuditLoggingOptions: denyLogger, + } + rbacs = append(rbacs, denyRBAC) + } + allowPolicies, err := parseRules(policy.AllowRules, policy.Name) + if err != nil { + return nil, "", fmt.Errorf(`"allow_rules" %v`, err) + } + allowRBAC := &v3rbacpb.RBAC{Action: v3rbacpb.RBAC_ALLOW, Policies: allowPolicies, AuditLoggingOptions: allowLogger} + return append(rbacs, allowRBAC), policy.Name, nil +} diff --git a/authz/rbac_translator_test.go b/authz/rbac_translator_test.go new file mode 100644 index 000000000000..23b6fb669e9c --- /dev/null +++ b/authz/rbac_translator_test.go @@ -0,0 +1,1052 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package authz + +import ( + "strings" + "testing" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" +) + +func TestTranslatePolicy(t *testing.T) { + tests := map[string]struct { + authzPolicy string + wantErr string + wantPolicies []*v3rbacpb.RBAC + wantPolicyName string + }{ + "valid policy": { + authzPolicy: `{ + "name": "authz", + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc", + "spiffe://bar*", + "*baz", + "spiffe://abc.*.com" + ] + } + }], + "allow_rules": [ + { + "name": "allow_policy_1", + "source": { + "principals":["*"] + }, + "request": { + "paths": ["path-foo*"] + } + }, + { + "name": "allow_policy_2", + "request": { + "paths": [ + "path-bar", + "*baz" + ], + "headers": [ + { + "key": "key-1", + "values": ["foo", "*bar"] + }, + { + "key": "key-2", + "values": ["baz*"] + } + ] + } + }] + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "spiffe://bar"}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: "baz"}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://abc.*.com"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{}, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "path-foo"}, + }}}, + }}, + }, + }}}, + }, + }}}, + }, + }, + "authz_allow_policy_2": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "path-bar"}, + }}}, + }}, + {Rule: &v3rbacpb.Permission_UrlPath{ + UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: "baz"}, + }}}, + }}, + }, + }}}, + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-1", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "foo"}, + }, + }}, + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-1", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "bar"}, + }, + }}, + }, + }}}, + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{ + Header: &v3routepb.HeaderMatcher{ + Name: "key-2", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "baz"}, + }, + }}, + }, + }}}, + }, + }}}, + }, + }}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{}, + }, + }, + wantPolicyName: "authz", + }, + "allow authenticated": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }] + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{}, + }, + }, + }, + "audit_logging_ALLOW empty config": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "ON_ALLOW", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "audit_logging_DENY_AND_ALLOW": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "ON_DENY_AND_ALLOW", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "audit_logging_NONE": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "audit_logging_custom_config simple": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {"abc":123, "xyz":"123"}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "audit_logging_custom_config nested": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {"abc":123, "xyz":{"abc":123}}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{"abc": 123, "xyz": map[string]interface{}{"abc": 123}}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "missing audit logger config": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "NONE" + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{}, + }, + }, + }, + }, + "missing audit condition": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "missing custom config audit logger": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "deny_rules": [ + { + "name": "deny_policy_1", + "source": { + "principals":[ + "spiffe://foo.abc" + ] + } + }], + "audit_logging_options": { + "audit_condition": "ON_DENY", + "audit_loggers": [ + { + "name": "stdout_logger", + "is_optional": false + } + ] + } + }`, + wantPolicies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "authz_deny_policy_1": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "spiffe://foo.abc"}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "authz_allow_authenticated": { + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: ".+"}}, + }}, + }}, + {Identifier: &v3rbacpb.Principal_Authenticated_{ + Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{ + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}, + }}, + }}, + }, + }}}, + }, + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{Name: "stdout_logger", TypedConfig: anyPbHelper(t, map[string]interface{}{}, "stdout_logger")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + "unknown field": { + authzPolicy: `{"random": 123}`, + wantErr: "failed to unmarshal policy", + }, + "missing name field": { + authzPolicy: `{}`, + wantErr: `"name" is not present`, + }, + "invalid field type": { + authzPolicy: `{"name": 123}`, + wantErr: "failed to unmarshal policy", + }, + "missing allow rules field": { + authzPolicy: `{"name": "authz-foo"}`, + wantErr: `"allow_rules" is not present`, + }, + "missing rule name field": { + authzPolicy: `{ + "name": "authz-foo", + "allow_rules": [{}] + }`, + wantErr: `"allow_rules" 0: "name" is not present`, + }, + "missing header key": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [{ + "name": "allow_policy_1", + "request": {"headers":[{"key":"key-a", "values": ["value-a"]}, {}]} + }] + }`, + wantErr: `"allow_rules" 0: "headers" 1: "key" is not present`, + }, + "missing header values": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [{ + "name": "allow_policy_1", + "request": {"headers":[{"key":"key-a"}]} + }] + }`, + wantErr: `"allow_rules" 0: "headers" 0: "values" is not present`, + }, + "unsupported header": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [{ + "name": "allow_policy_1", + "request": {"headers":[{"key":":method", "values":["GET"]}]} + }] + }`, + wantErr: `"allow_rules" 0: "headers" 0: unsupported "key" :method`, + }, + "bad audit condition": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "ABC", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantErr: `failed to parse AuditCondition ABC`, + }, + "bad audit logger config": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "stdout_logger", + "config": "abc", + "is_optional": false + } + ] + } + }`, + wantErr: `failed to unmarshal policy`, + }, + "missing audit logger name": { + authzPolicy: `{ + "name": "authz", + "allow_rules": [ + { + "name": "allow_authenticated", + "source": { + "principals":["*", ""] + } + }], + "audit_logging_options": { + "audit_condition": "NONE", + "audit_loggers": [ + { + "name": "", + "config": {}, + "is_optional": false + } + ] + } + }`, + wantErr: `missing required field: name`, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + gotPolicies, gotPolicyName, gotErr := translatePolicy(test.authzPolicy) + if gotErr != nil && !strings.HasPrefix(gotErr.Error(), test.wantErr) { + t.Fatalf("unexpected error\nwant:%v\ngot:%v", test.wantErr, gotErr) + } + if diff := cmp.Diff(gotPolicies, test.wantPolicies, protocmp.Transform()); diff != "" { + t.Fatalf("unexpected policy\ndiff (-want +got):\n%s", diff) + } + if test.wantPolicyName != "" && gotPolicyName != test.wantPolicyName { + t.Fatalf("unexpected policy name\nwant:%v\ngot:%v", test.wantPolicyName, gotPolicyName) + } + }) + } +} + +func anyPbHelper(t *testing.T, in map[string]interface{}, name string) *anypb.Any { + t.Helper() + pb, err := structpb.NewStruct(in) + typedStruct := &v1xdsudpatypepb.TypedStruct{ + TypeUrl: typeURLPrefix + name, + Value: pb, + } + if err != nil { + t.Fatal(err) + } + customConfig, err := anypb.New(typedStruct) + if err != nil { + t.Fatal(err) + } + return customConfig +} diff --git a/backoff.go b/backoff.go index 542594f5cc51..29475e31c979 100644 --- a/backoff.go +++ b/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/balancer/balancer.go b/balancer/balancer.go index ab531f4c0b80..8f00523c0e24 100644 --- a/balancer/balancer.go +++ b/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -75,24 +76,26 @@ func Get(name string) Builder { return nil } -// SubConn represents a gRPC sub connection. -// Each sub connection contains a list of addresses. gRPC will -// try to connect to them (in sequence), and stop trying the -// remainder once one connection is successful. +// A SubConn represents a single connection to a gRPC backend service. // -// The reconnect backoff will be applied on the list, not a single address. -// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// Each SubConn contains a list of addresses. // -// All SubConns start in IDLE, and will not try to connect. To trigger -// the connecting, Balancers must call Connect. -// When the connection encounters an error, it will reconnect immediately. -// When the connection becomes IDLE, it will not reconnect unless Connect is -// called. +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. // -// This interface is to be implemented by gRPC. Users should not need a -// brand new implementation of this interface. For the situations like -// testing, the new implementation should embed this interface. This allows -// gRPC to add new methods to this interface. +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. @@ -107,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -172,25 +180,32 @@ type ClientConn interface { // BuildOptions contains additional information for Build. type BuildOptions struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. DialCreds credentials.TransportCredentials - // CredsBundle is the credentials bundle that the Balancer can use. + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. CredsBundle credentials.Bundle - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. Dialer func(context.Context, string) (net.Conn, error) - // ChannelzParentID is the entity parent's channelz unique identification number. - ChannelzParentID int64 + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. CustomUserAgent string - // Target contains the parsed address info of the dial target. It is the same resolver.Target as - // passed to the resolver. - // See the documentation for the resolver.Target type for details about what it contains. + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. Target resolver.Target } @@ -234,7 +249,7 @@ type DoneInfo struct { // ServerLoad is the load received from server. It's usually sent as part of // trailing metadata. // - // The only supported type now is *orca_v1.LoadReport. + // The only supported type now is *orca_v3.LoadReport. ServerLoad interface{} } @@ -264,6 +279,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -326,6 +349,20 @@ type Balancer interface { Close() } +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + // SubConnState describes the state of a SubConn. type SubConnState struct { // ConnectivityState is the connectivity state of the SubConn. @@ -348,41 +385,20 @@ type ClientConnState struct { // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") -// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns -// and returns one aggregated connectivity state. -// -// It's not thread safe. -type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) } -// RecordTransition records state change happening in subConn and based on that -// it evaluates what aggregated state should be. -// -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else the aggregated state is TransientFailure. -// -// Idle and Shutdown are not considered. -func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { - // Update counters. - for idx, state := range []connectivity.State{oldState, newState} { - updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. - switch state { - case connectivity.Ready: - cse.numReady += updateVal - case connectivity.Connecting: - cse.numConnecting += updateVal - } - } - - // Evaluate. - if cse.numReady > 0 { - return connectivity.Ready - } - if cse.numConnecting > 0 { - return connectivity.Connecting - } - return connectivity.TransientFailure +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { } diff --git a/balancer/base/balancer.go b/balancer/base/balancer.go index c883efa0bbf5..3929c26d31e1 100644 --- a/balancer/base/balancer.go +++ b/balancer/base/balancer.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" @@ -42,10 +41,11 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: make(map[resolver.Address]subConnInfo), + subConns: resolver.NewAddressMap(), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, + state: connectivity.Connecting, } // Initialize picker to a picker that always returns // ErrNoSubConnAvailable, because when state of a SubConn changes, we @@ -58,11 +58,6 @@ func (bb *baseBuilder) Name() string { return bb.name } -type subConnInfo struct { - subConn balancer.SubConn - attrs *attributes.Attributes -} - type baseBalancer struct { cc balancer.ClientConn pickerBuilder PickerBuilder @@ -70,7 +65,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses) + subConns *resolver.AddressMap scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -81,7 +76,7 @@ type baseBalancer struct { func (b *baseBalancer) ResolverError(err error) { b.resolverErr = err - if len(b.subConns) == 0 { + if b.subConns.Len() == 0 { b.state = connectivity.TransientFailure } @@ -105,52 +100,29 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := make(map[resolver.Address]struct{}) + addrsSet := resolver.NewAddressMap() for _, a := range s.ResolverState.Addresses { - // Strip attributes from addresses before using them as map keys. So - // that when two addresses only differ in attributes pointers (but with - // the same attribute content), they are considered the same address. - // - // Note that this doesn't handle the case where the attribute content is - // different. So if users want to set different attributes to create - // duplicate connections to the same backend, it doesn't work. This is - // fine for now, because duplicate is done by setting Metadata today. - // - // TODO: read attributes to handle duplicate connections. - aNoAttrs := a - aNoAttrs.Attributes = nil - addrsSet[aNoAttrs] = struct{}{} - if scInfo, ok := b.subConns[aNoAttrs]; !ok { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - // - // When creating SubConn, the original address with attributes is - // passed through. So that connection configurations in attributes - // (like creds) will be used. sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue } - b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes} + b.subConns.Set(a, sc) b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) sc.Connect() - } else { - // Always update the subconn's address in case the attributes - // changed. - // - // The SubConn does a reflect.DeepEqual of the new and old - // addresses. So this is a noop if the current address is the same - // as the old one (including attributes). - scInfo.attrs = a.Attributes - b.subConns[aNoAttrs] = scInfo - b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a}) } } - for a, scInfo := range b.subConns { + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) // a was removed by resolver. - if _, ok := addrsSet[a]; !ok { - b.cc.RemoveSubConn(scInfo.subConn) - delete(b.subConns, a) + if _, ok := addrsSet.Get(a); !ok { + b.cc.RemoveSubConn(sc) + b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. } @@ -163,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) return nil } @@ -182,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) @@ -192,10 +167,11 @@ func (b *baseBalancer) regeneratePicker() { readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. - for addr, scInfo := range b.subConns { - if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready { - addr.Attributes = scInfo.attrs - readySCs[scInfo.subConn] = SubConnInfo{Address: addr} + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} } } b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) @@ -213,10 +189,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } - if oldS == connectivity.TransientFailure && s == connectivity.Connecting { - // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or // CONNECTING transitions to prevent the aggregated state from being // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } return } b.scStates[sc] = s @@ -242,7 +222,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } @@ -251,6 +230,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + // NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} diff --git a/balancer/base/balancer_test.go b/balancer/base/balancer_test.go index f8ff8cf98444..b50abf8526e6 100644 --- a/balancer/base/balancer_test.go +++ b/balancer/base/balancer_test.go @@ -44,6 +44,10 @@ func (sc *testSubConn) UpdateAddresses(addresses []resolver.Address) {} func (sc *testSubConn) Connect() {} +func (sc *testSubConn) GetOrBuildProducer(balancer.ProducerBuilder) (balancer.Producer, func()) { + return nil, nil +} + // testPickBuilder creates balancer.Picker for test. type testPickBuilder struct { validate func(info PickerBuildInfo) @@ -54,34 +58,6 @@ func (p *testPickBuilder) Build(info PickerBuildInfo) balancer.Picker { return nil } -func TestBaseBalancerStripAttributes(t *testing.T) { - b := (&baseBuilder{}).Build(&testClientConn{ - newSubConn: func(addrs []resolver.Address, _ balancer.NewSubConnOptions) (balancer.SubConn, error) { - for _, addr := range addrs { - if addr.Attributes == nil { - t.Errorf("in NewSubConn, got address %+v with nil attributes, want not nil", addr) - } - } - return &testSubConn{}, nil - }, - }, balancer.BuildOptions{}).(*baseBalancer) - - b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: []resolver.Address{ - {Addr: "1.1.1.1", Attributes: &attributes.Attributes{}}, - {Addr: "2.2.2.2", Attributes: &attributes.Attributes{}}, - }, - }, - }) - - for addr := range b.subConns { - if addr.Attributes != nil { - t.Errorf("in b.subConns, got address %+v with not nil attributes, want nil", addr) - } - } -} - func TestBaseBalancerReserveAttributes(t *testing.T) { var v = func(info PickerBuildInfo) { for _, sc := range info.ReadySCs { diff --git a/balancer/conn_state_evaluator.go b/balancer/conn_state_evaluator.go new file mode 100644 index 000000000000..c33413581091 --- /dev/null +++ b/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/balancer/conn_state_evaluator_test.go b/balancer/conn_state_evaluator_test.go new file mode 100644 index 000000000000..d82ddf84c240 --- /dev/null +++ b/balancer/conn_state_evaluator_test.go @@ -0,0 +1,245 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "testing" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestRecordTransition_FirstStateChange tests the first call to +// RecordTransition where the `oldState` is usually set to `Shutdown` (a state +// that the ConnectivityStateEvaluator is set to ignore). +func (s) TestRecordTransition_FirstStateChange(t *testing.T) { + tests := []struct { + newState connectivity.State + wantState connectivity.State + }{ + { + newState: connectivity.Idle, + wantState: connectivity.Idle, + }, + { + newState: connectivity.Connecting, + wantState: connectivity.Connecting, + }, + { + newState: connectivity.Ready, + wantState: connectivity.Ready, + }, + { + newState: connectivity.TransientFailure, + wantState: connectivity.TransientFailure, + }, + { + newState: connectivity.Shutdown, + wantState: connectivity.TransientFailure, + }, + } + for _, test := range tests { + cse := &ConnectivityStateEvaluator{} + if gotState := cse.RecordTransition(connectivity.Shutdown, test.newState); gotState != test.wantState { + t.Fatalf("RecordTransition(%v, %v) = %v, want %v", connectivity.Shutdown, test.newState, gotState, test.wantState) + } + } +} + +// TestRecordTransition_SameState tests the scenario where state transitions to +// the same state are recorded multiple times. +func (s) TestRecordTransition_SameState(t *testing.T) { + tests := []struct { + newState connectivity.State + wantState connectivity.State + }{ + { + newState: connectivity.Idle, + wantState: connectivity.Idle, + }, + { + newState: connectivity.Connecting, + wantState: connectivity.Connecting, + }, + { + newState: connectivity.Ready, + wantState: connectivity.Ready, + }, + { + newState: connectivity.TransientFailure, + wantState: connectivity.TransientFailure, + }, + { + newState: connectivity.Shutdown, + wantState: connectivity.TransientFailure, + }, + } + const numStateChanges = 5 + for _, test := range tests { + cse := &ConnectivityStateEvaluator{} + var prevState, gotState connectivity.State + prevState = connectivity.Shutdown + for i := 0; i < numStateChanges; i++ { + gotState = cse.RecordTransition(prevState, test.newState) + prevState = test.newState + } + if gotState != test.wantState { + t.Fatalf("RecordTransition() = %v, want %v", gotState, test.wantState) + } + } +} + +// TestRecordTransition_SingleSubConn_DifferentStates tests some common +// connectivity state change scenarios, on a single subConn. +func (s) TestRecordTransition_SingleSubConn_DifferentStates(t *testing.T) { + tests := []struct { + name string + states []connectivity.State + wantState connectivity.State + }{ + { + name: "regular transition to ready", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready}, + wantState: connectivity.Ready, + }, + { + name: "regular transition to transient failure", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.TransientFailure}, + wantState: connectivity.TransientFailure, + }, + { + name: "regular transition to ready", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.Idle}, + wantState: connectivity.Idle, + }, + { + name: "transition from ready to transient failure", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure}, + wantState: connectivity.TransientFailure, + }, + { + name: "transition from transient failure back to ready", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure, connectivity.Ready}, + wantState: connectivity.Ready, + }, + { + // This state transition is usually suppressed at the LB policy level, by + // not calling RecordTransition. + name: "transition from transient failure back to idle", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure, connectivity.Idle}, + wantState: connectivity.Idle, + }, + { + // This state transition is usually suppressed at the LB policy level, by + // not calling RecordTransition. + name: "transition from transient failure back to connecting", + states: []connectivity.State{connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure, connectivity.Connecting}, + wantState: connectivity.Connecting, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cse := &ConnectivityStateEvaluator{} + var prevState, gotState connectivity.State + prevState = connectivity.Shutdown + for _, newState := range test.states { + gotState = cse.RecordTransition(prevState, newState) + prevState = newState + } + if gotState != test.wantState { + t.Fatalf("RecordTransition() = %v, want %v", gotState, test.wantState) + } + }) + } +} + +// TestRecordTransition_MultipleSubConns_DifferentStates tests state transitions +// among multiple subConns, and verifies that the connectivity state aggregation +// algorithm produces the expected aggregate connectivity state. +func (s) TestRecordTransition_MultipleSubConns_DifferentStates(t *testing.T) { + tests := []struct { + name string + // Each entry in this slice corresponds to the state changes happening on an + // individual subConn. + subConnStates [][]connectivity.State + wantState connectivity.State + }{ + { + name: "atleast one ready", + subConnStates: [][]connectivity.State{ + {connectivity.Idle, connectivity.Connecting, connectivity.Ready}, + {connectivity.Idle}, + {connectivity.Idle, connectivity.Connecting}, + {connectivity.Idle, connectivity.Connecting, connectivity.TransientFailure}, + }, + wantState: connectivity.Ready, + }, + { + name: "atleast one connecting", + subConnStates: [][]connectivity.State{ + {connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.Connecting}, + {connectivity.Idle}, + {connectivity.Idle, connectivity.Connecting, connectivity.TransientFailure}, + }, + wantState: connectivity.Connecting, + }, + { + name: "atleast one idle", + subConnStates: [][]connectivity.State{ + {connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.Idle}, + {connectivity.Idle, connectivity.Connecting, connectivity.TransientFailure}, + }, + wantState: connectivity.Idle, + }, + { + name: "atleast one transient failure", + subConnStates: [][]connectivity.State{ + {connectivity.Idle, connectivity.Connecting, connectivity.Ready, connectivity.TransientFailure}, + {connectivity.TransientFailure}, + }, + wantState: connectivity.TransientFailure, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cse := &ConnectivityStateEvaluator{} + var prevState, gotState connectivity.State + for _, scStates := range test.subConnStates { + prevState = connectivity.Shutdown + for _, newState := range scStates { + gotState = cse.RecordTransition(prevState, newState) + prevState = newState + } + } + if gotState != test.wantState { + t.Fatalf("RecordTransition() = %v, want %v", gotState, test.wantState) + } + }) + } +} diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index c393d7ffd3b2..f070878bd993 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,14 +19,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -42,16 +41,13 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type LoadBalanceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceRequestType: + // // *LoadBalanceRequest_InitialRequest // *LoadBalanceRequest_ClientStats LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` @@ -340,6 +336,7 @@ type LoadBalanceResponse struct { unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceResponseType: + // // *LoadBalanceResponse_InitialResponse // *LoadBalanceResponse_ServerList // *LoadBalanceResponse_FallbackResponse diff --git a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index d56b77cca634..00d0954b38a5 100644 --- a/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,4 +1,27 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines the GRPCLB LoadBalancing protocol. +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/lb/v1/load_balancer.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -14,6 +37,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" +) + // LoadBalancerClient is the client API for LoadBalancer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -31,7 +58,7 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { } func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/balancer/grpclb/grpclb.go b/balancer/grpclb/grpclb.go index a43d8964119f..6d698229a342 100644 --- a/balancer/grpclb/grpclb.go +++ b/balancer/grpclb/grpclb.go @@ -19,12 +19,14 @@ // Package grpclb defines a grpclb balancer. // // To install grpclb balancer, import this package as: -// import _ "google.golang.org/grpc/balancer/grpclb" +// +// import _ "google.golang.org/grpc/balancer/grpclb" package grpclb import ( "context" "errors" + "fmt" "sync" "time" @@ -134,7 +136,8 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal lb := &lbBalancer{ cc: newLBCacheClientConn(cc), - target: opt.Target.Endpoint, + dialTarget: opt.Target.Endpoint(), + target: opt.Target.Endpoint(), opt: opt, fallbackTimeout: b.fallbackTimeout, doneCh: make(chan struct{}), @@ -163,9 +166,10 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal } type lbBalancer struct { - cc *lbCacheClientConn - target string - opt balancer.BuildOptions + cc *lbCacheClientConn + dialTarget string // user's dial target + target string // same as dialTarget unless overridden in service config + opt balancer.BuildOptions usePickFirst bool @@ -221,16 +225,18 @@ type lbBalancer struct { // when resolved address updates are received, and read in the goroutine // handling fallback. resolvedBackendAddrs []resolver.Address + connErr error // the last connection error } // regeneratePicker takes a snapshot of the balancer, and generates a picker from // it. The picker -// - always returns ErrTransientFailure if the balancer is in TransientFailure, -// - does two layer roundrobin pick otherwise. +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: balancer.ErrTransientFailure} + lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} return } @@ -286,14 +292,14 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // fallback and grpclb). lb.scState contains states for all SubConns, including // those in cache (SubConns are cached for 10 seconds after remove). // -// The aggregated state is: -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; -// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, -// they start to connect immediately. But there's a race between the overall -// state is reported, and when the new SubConn state arrives. And SubConns -// never go back to IDLE. -// - Else the aggregated state is TransientFailure. +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; +// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, +// they start to connect immediately. But there's a race between the overall +// state is reported, and when the new SubConn state arrives. And SubConns +// never go back to IDLE. +// - Else the aggregated state is TransientFailure. func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { var numConnecting uint64 @@ -336,6 +342,8 @@ func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubCo // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) + case connectivity.TransientFailure: + lb.connErr = scs.ConnectionError } // Force regenerate picker if // - this sc became ready from not-ready @@ -394,6 +402,30 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { lb.mu.Lock() defer lb.mu.Unlock() + // grpclb uses the user's dial target to populate the `Name` field of the + // `InitialLoadBalanceRequest` message sent to the remote balancer. But when + // grpclb is used a child policy in the context of RLS, we want the `Name` + // field to be populated with the value received from the RLS server. To + // support this use case, an optional "target_name" field has been added to + // the grpclb LB policy's config. If specified, it overrides the name of + // the target to be sent to the remote balancer; if not, the target to be + // sent to the balancer will continue to be obtained from the target URI + // passed to the gRPC client channel. Whenever that target to be sent to the + // balancer is updated, we need to restart the stream to the balancer as + // this target is sent in the first message on the stream. + if gc != nil { + target := lb.dialTarget + if gc.ServiceName != "" { + target = gc.ServiceName + } + if target != lb.target { + lb.target = target + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.cancelRemoteBalancerCall() + } + } + } + newUsePickFirst := childIsPickFirst(gc) if lb.usePickFirst == newUsePickFirst { return @@ -484,3 +516,5 @@ func (lb *lbBalancer) Close() { } lb.cc.close() } + +func (lb *lbBalancer) ExitIdle() {} diff --git a/balancer/grpclb/grpclb_config.go b/balancer/grpclb/grpclb_config.go index aac3719631b4..8942c31310af 100644 --- a/balancer/grpclb/grpclb_config.go +++ b/balancer/grpclb/grpclb_config.go @@ -34,6 +34,7 @@ const ( type grpclbServiceConfig struct { serviceconfig.LoadBalancingConfig ChildPolicy *[]map[string]json.RawMessage + ServiceName string } func (b *lbBuilder) ParseConfig(lbConfig json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { diff --git a/balancer/grpclb/grpclb_config_test.go b/balancer/grpclb/grpclb_config_test.go index 5a45de90494b..040908728793 100644 --- a/balancer/grpclb/grpclb_config_test.go +++ b/balancer/grpclb/grpclb_config_test.go @@ -20,52 +20,68 @@ package grpclb import ( "encoding/json" - "errors" - "fmt" - "reflect" - "strings" "testing" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc/serviceconfig" ) func (s) TestParse(t *testing.T) { tests := []struct { name string - s string + sc string want serviceconfig.LoadBalancingConfig - wantErr error + wantErr bool }{ { name: "empty", - s: "", + sc: "", want: nil, - wantErr: errors.New("unexpected end of JSON input"), + wantErr: true, }, { name: "success1", - s: `{"childPolicy":[{"pick_first":{}}]}`, + sc: ` +{ + "childPolicy": [ + {"pick_first":{}} + ], + "serviceName": "foo-service" +}`, want: &grpclbServiceConfig{ ChildPolicy: &[]map[string]json.RawMessage{ {"pick_first": json.RawMessage("{}")}, }, + ServiceName: "foo-service", }, }, { name: "success2", - s: `{"childPolicy":[{"round_robin":{}},{"pick_first":{}}]}`, + sc: ` +{ + "childPolicy": [ + {"round_robin":{}}, + {"pick_first":{}} + ], + "serviceName": "foo-service" +}`, want: &grpclbServiceConfig{ ChildPolicy: &[]map[string]json.RawMessage{ {"round_robin": json.RawMessage("{}")}, {"pick_first": json.RawMessage("{}")}, }, + ServiceName: "foo-service", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got, err := (&lbBuilder{}).ParseConfig(json.RawMessage(tt.s)); !reflect.DeepEqual(got, tt.want) || !strings.Contains(fmt.Sprint(err), fmt.Sprint(tt.wantErr)) { - t.Errorf("parseFullServiceConfig() = %+v, %+v, want %+v, ", got, err, tt.want, tt.wantErr) + got, err := (&lbBuilder{}).ParseConfig(json.RawMessage(tt.sc)) + if (err != nil) != (tt.wantErr) { + t.Fatalf("ParseConfig(%q) returned error: %v, wantErr: %v", tt.sc, err, tt.wantErr) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Fatalf("ParseConfig(%q) returned unexpected difference (-want +got):\n%s", tt.sc, diff) } }) } diff --git a/balancer/grpclb/grpclb_remote_balancer.go b/balancer/grpclb/grpclb_remote_balancer.go index 5ac8d86bd570..e56006d7131a 100644 --- a/balancer/grpclb/grpclb_remote_balancer.go +++ b/balancer/grpclb/grpclb_remote_balancer.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/balancer" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/channelz" imetadata "google.golang.org/grpc/internal/metadata" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -135,11 +135,19 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } if lb.usePickFirst { - var sc balancer.SubConn - for _, sc = range lb.subConns { + var ( + scKey resolver.Address + sc balancer.SubConn + ) + for scKey, sc = range lb.subConns { break } if sc != nil { + if len(backendAddrs) == 0 { + lb.cc.cc.RemoveSubConn(sc) + delete(lb.subConns, scKey) + return + } lb.cc.cc.UpdateAddresses(sc, backendAddrs) sc.Connect() return @@ -206,6 +214,9 @@ type remoteBalancerCCWrapper struct { backoff backoff.Strategy done chan struct{} + streamMu sync.Mutex + streamCancel func() + // waitgroup to wait for all goroutines to exit. wg sync.WaitGroup } @@ -217,7 +228,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { } else if bundle := lb.grpclbClientConnCreds; bundle != nil { dopts = append(dopts, grpc.WithCredentialsBundle(bundle)) } else { - dopts = append(dopts, grpc.WithInsecure()) + dopts = append(dopts, grpc.WithTransportCredentials(insecure.NewCredentials())) } if lb.opt.Dialer != nil { dopts = append(dopts, grpc.WithContextDialer(lb.opt.Dialer)) @@ -228,9 +239,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // Explicitly set pickfirst as the balancer. dopts = append(dopts, grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"pick_first"}`)) dopts = append(dopts, grpc.WithResolvers(lb.manualResolver)) - if channelz.IsOn() { - dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) - } + dopts = append(dopts, grpc.WithChannelzParentID(lb.opt.ChannelzParentID)) // Enable Keepalive for grpclb client. dopts = append(dopts, grpc.WithKeepaliveParams(keepalive.ClientParameters{ @@ -319,13 +328,11 @@ func (ccw *remoteBalancerCCWrapper) sendLoadReport(s *balanceLoadClientStream, i } } -func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) { +func (ccw *remoteBalancerCCWrapper) callRemoteBalancer(ctx context.Context) (backoff bool, _ error) { lbClient := &loadBalancerClient{cc: ccw.cc} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) if err != nil { - return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer: %v", err) } ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = true @@ -362,11 +369,43 @@ func (ccw *remoteBalancerCCWrapper) callRemoteBalancer() (backoff bool, _ error) return false, ccw.readServerList(stream) } +// cancelRemoteBalancerCall cancels the context used by the stream to the remote +// balancer. watchRemoteBalancer() takes care of restarting this call after the +// stream fails. +func (ccw *remoteBalancerCCWrapper) cancelRemoteBalancerCall() { + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() +} + func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { - defer ccw.wg.Done() + defer func() { + ccw.wg.Done() + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + // This is to make sure that we don't leak the context when we are + // directly returning from inside of the below `for` loop. + ccw.streamCancel() + ccw.streamCancel = nil + } + ccw.streamMu.Unlock() + }() + var retryCount int + var ctx context.Context for { - doBackoff, err := ccw.callRemoteBalancer() + ccw.streamMu.Lock() + if ccw.streamCancel != nil { + ccw.streamCancel() + ccw.streamCancel = nil + } + ctx, ccw.streamCancel = context.WithCancel(context.Background()) + ccw.streamMu.Unlock() + + doBackoff, err := ccw.callRemoteBalancer(ctx) select { case <-ccw.done: return diff --git a/balancer/grpclb/grpclb_test.go b/balancer/grpclb/grpclb_test.go index 9cbb338c2415..9dbfd3466401 100644 --- a/balancer/grpclb/grpclb_test.go +++ b/balancer/grpclb/grpclb_test.go @@ -31,22 +31,31 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc" "google.golang.org/grpc/balancer" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/pickfirst" + "google.golang.org/grpc/internal/testutils/roundrobin" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" durationpb "github.com/golang/protobuf/ptypes/duration" lbgrpc "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( @@ -60,6 +69,13 @@ var ( fakeName = "fake.Name" ) +const ( + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond + testUserAgent = "test-user-agent" + grpclbConfig = `{"loadBalancingConfig": [{"grpclb": {}}]}` +) + type s struct { grpctest.Tester } @@ -136,18 +152,6 @@ func (s *rpcStats) merge(cs *lbpb.ClientStats) { s.mu.Unlock() } -func mapsEqual(a, b map[string]int64) bool { - if len(a) != len(b) { - return false - } - for k, v1 := range a { - if v2, ok := b[k]; !ok || v1 != v2 { - return false - } - } - return true -} - func atomicEqual(a, b *int64) bool { return atomic.LoadInt64(a) == atomic.LoadInt64(b) } @@ -172,7 +176,7 @@ func (s *rpcStats) equal(o *rpcStats) bool { defer s.mu.Unlock() o.mu.Lock() defer o.mu.Unlock() - return mapsEqual(s.numCallsDropped, o.numCallsDropped) + return cmp.Equal(s.numCallsDropped, o.numCallsDropped, cmpopts.EquateEmpty()) } func (s *rpcStats) String() string { @@ -188,24 +192,28 @@ func (s *rpcStats) String() string { type remoteBalancer struct { lbgrpc.UnimplementedLoadBalancerServer - sls chan *lbpb.ServerList - statsDura time.Duration - done chan struct{} - stats *rpcStats - statsChan chan *lbpb.ClientStats - fbChan chan struct{} - - customUserAgent string + sls chan *lbpb.ServerList + statsDura time.Duration + done chan struct{} + stats *rpcStats + statsChan chan *lbpb.ClientStats + fbChan chan struct{} + balanceLoadCh chan struct{} // notify successful invocation of BalanceLoad + + wantUserAgent string // expected user-agent in metadata of BalancerLoad + wantServerName string // expected server name in InitialLoadBalanceRequest } -func newRemoteBalancer(customUserAgent string, statsChan chan *lbpb.ClientStats) *remoteBalancer { +func newRemoteBalancer(wantUserAgent, wantServerName string, statsChan chan *lbpb.ClientStats) *remoteBalancer { return &remoteBalancer{ - sls: make(chan *lbpb.ServerList, 1), - done: make(chan struct{}), - stats: newRPCStats(), - statsChan: statsChan, - fbChan: make(chan struct{}), - customUserAgent: customUserAgent, + sls: make(chan *lbpb.ServerList, 1), + done: make(chan struct{}), + stats: newRPCStats(), + statsChan: statsChan, + fbChan: make(chan struct{}), + balanceLoadCh: make(chan struct{}, 1), + wantUserAgent: wantUserAgent, + wantServerName: wantServerName, } } @@ -218,15 +226,18 @@ func (b *remoteBalancer) fallbackNow() { b.fbChan <- struct{}{} } +func (b *remoteBalancer) updateServerName(name string) { + b.wantServerName = name +} + func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) if !ok { return status.Error(codes.Internal, "failed to receive metadata") } - if b.customUserAgent != "" { - ua := md["user-agent"] - if len(ua) == 0 || !strings.HasPrefix(ua[0], b.customUserAgent) { - return status.Errorf(codes.InvalidArgument, "received unexpected user-agent: %v, want prefix %q", ua, b.customUserAgent) + if b.wantUserAgent != "" { + if ua := md["user-agent"]; len(ua) == 0 || !strings.HasPrefix(ua[0], b.wantUserAgent) { + return status.Errorf(codes.InvalidArgument, "received unexpected user-agent: %v, want prefix %q", ua, b.wantUserAgent) } } @@ -235,9 +246,10 @@ func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServe return err } initReq := req.GetInitialRequest() - if initReq.Name != beServerName { - return status.Errorf(codes.InvalidArgument, "invalid service name: %v", initReq.Name) + if initReq.Name != b.wantServerName { + return status.Errorf(codes.InvalidArgument, "invalid service name: %q, want: %q", initReq.Name, b.wantServerName) } + b.balanceLoadCh <- struct{}{} resp := &lbpb.LoadBalanceResponse{ LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ InitialResponse: &lbpb.InitialLoadBalanceResponse{ @@ -253,11 +265,8 @@ func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServe } go func() { for { - var ( - req *lbpb.LoadBalanceRequest - err error - ) - if req, err = stream.Recv(); err != nil { + req, err := stream.Recv() + if err != nil { return } b.stats.merge(req.GetClientStats()) @@ -290,7 +299,7 @@ func (b *remoteBalancer) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServe } type testServer struct { - testpb.UnimplementedTestServiceServer + testgrpc.UnimplementedTestServiceServer addr string fallback bool @@ -310,21 +319,22 @@ func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.E return &testpb.Empty{}, nil } -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { return nil } -func startBackends(sn string, fallback bool, lis ...net.Listener) (servers []*grpc.Server) { +func startBackends(t *testing.T, sn string, fallback bool, lis ...net.Listener) (servers []*grpc.Server) { for _, l := range lis { creds := &serverNameCheckCreds{ sn: sn, } s := grpc.NewServer(grpc.Creds(creds)) - testpb.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String(), fallback: fallback}) + testgrpc.RegisterTestServiceServer(s, &testServer{addr: l.Addr().String(), fallback: fallback}) servers = append(servers, s) go func(s *grpc.Server, l net.Listener) { s.Serve(l) }(s, l) + t.Logf("Started backend server listening on %s", l.Addr().String()) } return } @@ -347,7 +357,7 @@ type testServers struct { beListeners []net.Listener } -func newLoadBalancer(numberOfBackends int, customUserAgent string, statsChan chan *lbpb.ClientStats) (tss *testServers, cleanup func(), err error) { +func startBackendsAndRemoteLoadBalancer(t *testing.T, numberOfBackends int, customUserAgent string, statsChan chan *lbpb.ClientStats) (tss *testServers, cleanup func(), err error) { var ( beListeners []net.Listener ls *remoteBalancer @@ -356,7 +366,6 @@ func newLoadBalancer(numberOfBackends int, customUserAgent string, statsChan cha bePorts []int ) for i := 0; i < numberOfBackends; i++ { - // Start a backend. beLis, e := net.Listen("tcp", "localhost:0") if e != nil { err = fmt.Errorf("failed to listen %v", err) @@ -365,26 +374,26 @@ func newLoadBalancer(numberOfBackends int, customUserAgent string, statsChan cha beIPs = append(beIPs, beLis.Addr().(*net.TCPAddr).IP) bePorts = append(bePorts, beLis.Addr().(*net.TCPAddr).Port) - beListeners = append(beListeners, newRestartableListener(beLis)) + beListeners = append(beListeners, testutils.NewRestartableListener(beLis)) } - backends := startBackends(beServerName, false, beListeners...) + backends := startBackends(t, beServerName, false, beListeners...) - // Start a load balancer. lbLis, err := net.Listen("tcp", "localhost:0") if err != nil { err = fmt.Errorf("failed to create the listener for the load balancer %v", err) return } - lbLis = newRestartableListener(lbLis) + lbLis = testutils.NewRestartableListener(lbLis) lbCreds := &serverNameCheckCreds{ sn: lbServerName, } lb = grpc.NewServer(grpc.Creds(lbCreds)) - ls = newRemoteBalancer(customUserAgent, statsChan) + ls = newRemoteBalancer(customUserAgent, beServerName, statsChan) lbgrpc.RegisterLoadBalancerServer(lb, ls) go func() { lb.Serve(lbLis) }() + t.Logf("Started remote load balancer server listening on %s", lbLis.Addr().String()) tss = &testServers{ lbAddr: net.JoinHostPort(fakeName, strconv.Itoa(lbLis.Addr().(*net.TCPAddr).Port)), @@ -407,61 +416,69 @@ func newLoadBalancer(numberOfBackends int, customUserAgent string, statsChan cha return } -var grpclbConfig = `{"loadBalancingConfig": [{"grpclb": {}}]}` - -func (s) TestGRPCLB(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - const testUserAgent = "test-user-agent" - tss, cleanup, err := newLoadBalancer(1, testUserAgent, nil) +// TestGRPCLB_Basic tests the basic case of a channel being configured with +// grpclb as the load balancing policy. +func (s) TestGRPCLB_Basic(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, testUserAgent, nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, + // Push the test backend address to the remote balancer. + tss.ls.sls <- &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, + + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the remote balancer + // address specified via attributes. + r := manual.NewBuilderWithScheme("whatever") + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, } - tss.ls.sls <- sl - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer), - grpc.WithUserAgent(testUserAgent)) + rs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s) + r.InitialState(rs) + + // Connect to the test backend. + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + grpc.WithUserAgent(testUserAgent), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, - &grpclbstate.State{BalancerAddresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.Backend, - ServerName: lbServerName, - }}}) - r.UpdateState(rs) - - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + // Make one successful RPC. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() + testC := testgrpc.NewTestServiceClient(cc) if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } } -// The remote balancer sends response with duplicates to grpclb client. -func (s) TestGRPCLBWeighted(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := newLoadBalancer(2, "", nil) +// TestGRPCLB_Weighted tests weighted roundrobin. The remote balancer is +// configured to send a response with duplicate backend addresses (to simulate +// weights) to the grpclb client. The test verifies that RPCs are weighted +// roundrobin-ed across these backends. +func (s) TestGRPCLB_Weighted(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 2, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -476,57 +493,67 @@ func (s) TestGRPCLBWeighted(t *testing.T) { Port: int32(tss.bePorts[1]), LoadBalanceToken: lbToken, }} - portsToIndex := make(map[int]int) - for i := range beServers { - portsToIndex[tss.bePorts[i]] = i + + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the remote balancer + // address specified via attributes. + r := manual.NewBuilderWithScheme("whatever") + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, } + rs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s) + r.InitialState(rs) - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + // Connect to test backends. + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }}}) - sequences := []string{"00101", "00011"} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Sequence represents the sequence of backends to be returned from the + // remote load balancer. + sequences := [][]int{ + {0, 0, 1, 0, 1}, + {0, 0, 0, 1, 1}, + } for _, seq := range sequences { - var ( - bes []*lbpb.Server - p peer.Peer - result string - ) + // Push the configured sequence of backend to the remote balancer, and + // compute the expected addresses to which RPCs should be routed. + var backends []*lbpb.Server + var wantAddrs []resolver.Address for _, s := range seq { - bes = append(bes, beServers[s-'0']) + backends = append(backends, beServers[s]) + wantAddrs = append(wantAddrs, resolver.Address{Addr: tss.beListeners[s].Addr().String()}) } - tss.ls.sls <- &lbpb.ServerList{Servers: bes} + tss.ls.sls <- &lbpb.ServerList{Servers: backends} - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) - } - // The generated result will be in format of "0010100101". - if !strings.Contains(result, strings.Repeat(seq, 2)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + testC := testgrpc.NewTestServiceClient(cc) + if err := roundrobin.CheckWeightedRoundRobinRPCs(ctx, testC, wantAddrs); err != nil { + t.Fatal(err) } } } -func (s) TestDropRequest(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := newLoadBalancer(2, "", nil) +// TestGRPCLB_DropRequest tests grpclb support for dropping requests based on +// configuration received from the remote balancer. +// +// TODO: Rewrite this test to verify drop behavior using the +// ClientStats.CallsFinishedWithDrop field instead. +func (s) TestGRPCLB_DropRequest(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 2, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -546,22 +573,34 @@ func (s) TestDropRequest(t *testing.T) { Drop: true, }}, } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the remote balancer + // address specified via attributes. + r := manual.NewBuilderWithScheme("whatever") + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, + } + rs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s) + r.InitialState(rs) + + // Connect to test backends. + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }}}) + testC := testgrpc.NewTestServiceClient(cc) var ( i int @@ -573,6 +612,8 @@ func (s) TestDropRequest(t *testing.T) { sleepEachLoop = time.Millisecond loopCount = int(time.Second / sleepEachLoop) ) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() // Make a non-fail-fast RPC and wait for it to succeed. for i = 0; i < loopCount; i++ { if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err == nil { @@ -653,7 +694,7 @@ func (s) TestDropRequest(t *testing.T) { for i := 0; i < 3; i++ { var p peer.Peer if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if want := tss.bePorts[1]; p.Addr.(*net.TCPAddr).Port != want { t.Errorf("got peer: %v, want peer port: %v", p.Addr, want) @@ -664,7 +705,7 @@ func (s) TestDropRequest(t *testing.T) { } if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Errorf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } if want := tss.bePorts[1]; p.Addr.(*net.TCPAddr).Port != want { t.Errorf("got peer: %v, want peer port: %v", p.Addr, want) @@ -672,436 +713,365 @@ func (s) TestDropRequest(t *testing.T) { } } -// When the balancer in use disconnects, grpclb should connect to the next address from resolved balancer address list. -func (s) TestBalancerDisconnects(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - +// TestGRPCLB_BalancerDisconnects tests the case where the remote balancer in +// use disconnects. The test verifies that grpclb connects to the next remote +// balancer address specified in attributes, and RPCs get routed to the backends +// returned by the new balancer. +func (s) TestGRPCLB_BalancerDisconnects(t *testing.T) { var ( tests []*testServers lbs []*grpc.Server ) for i := 0; i < 2; i++ { - tss, cleanup, err := newLoadBalancer(1, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, - } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, + tss.ls.sls <- &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, } - tss.ls.sls <- sl tests = append(tests, tss) lbs = append(lbs, tss.lb) } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the remote balancer + // addresses specified via attributes. + r := manual.NewBuilderWithScheme("whatever") + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tests[0].lbAddr, + ServerName: lbServerName, + }, + { + Addr: tests[1].lbAddr, + ServerName: lbServerName, + }, + }, + } + rs := grpclbstate.Set(resolver.State{ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig)}, s) + r.InitialState(rs) + + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tests[0].lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: tests[1].lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }}}) + testC := testgrpc.NewTestServiceClient(cc) - var p peer.Peer - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port != tests[0].bePorts[0] { - t.Fatalf("got peer: %v, want peer port: %v", p.Addr, tests[0].bePorts[0]) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tests[0].beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } - lbs[0].Stop() // Stop balancer[0], balancer[1] should be used by grpclb. // Check peer address to see if that happened. - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tests[1].bePorts[0] { - return - } - time.Sleep(time.Millisecond) + lbs[0].Stop() + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tests[1].beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } - t.Fatalf("No RPC sent to second backend after 1 second") } -func (s) TestFallback(t *testing.T) { +// TestGRPCLB_Fallback tests the following fallback scenarios: +// - when the remote balancer address specified in attributes is invalid, the +// test verifies that RPCs are routed to the fallback backend. +// - when the remote balancer address specified in attributes is changed to a +// valid one, the test verifies that RPCs are routed to the backend returned +// by the remote balancer. +// - when the configured remote balancer goes down, the test verifies that +// RPCs are routed to the fallback backend. +func (s) TestGRPCLB_Fallback(t *testing.T) { balancer.Register(newLBBuilderWithFallbackTimeout(100 * time.Millisecond)) defer balancer.Register(newLBBuilder()) - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := newLoadBalancer(1, "", nil) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() + sl := &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, + } + // Push the backend address to the remote balancer. + tss.ls.sls <- sl - // Start a standalone backend. + // Start a standalone backend for fallback. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() - standaloneBEs := startBackends(beServerName, true, beLis) + standaloneBEs := startBackends(t, beServerName, true, beLis) defer stopBackends(standaloneBEs) - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, - } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), } - tss.ls.sls <- sl - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: "invalid.address", - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}}) - - var p peer.Peer - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - if p.Addr.String() != beLis.Addr().String() { - t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr()) + // Push an update to the resolver with fallback backend address stored in + // the `Addresses` field and an invalid remote balancer address stored in + // attributes, which will cause fallback behavior to be invoked. + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: "invalid.address", ServerName: lbServerName}}}) + r.UpdateState(rs) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}}) + // Make an RPC and verify that it got routed to the fallback backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: beLis.Addr().String()}}); err != nil { + t.Fatal(err) + } - var backendUsed bool - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed = true - break - } - time.Sleep(time.Millisecond) + // Push another update to the resolver, this time with a valid balancer + // address in the attributes field. + rs = resolver.State{ + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, } - if !backendUsed { - t.Fatalf("No RPC sent to backend behind remote balancer after 1 second") + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.UpdateState(rs) + select { + case <-ctx.Done(): + t.Fatalf("timeout when waiting for BalanceLoad RPC to be called on the remote balancer") + case <-tss.ls.balanceLoadCh: } - // Close backend and remote balancer connections, should use fallback. - tss.beListeners[0].(*restartableListener).stopPreviousConns() - tss.lbListener.(*restartableListener).stopPreviousConns() - - var fallbackUsed bool - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - // Because we are hard-closing the connection, above, it's possible - // for the first RPC attempt to be sent on the old connection, - // which will lead to an Unavailable error when it is closed. - // Ignore unavailable errors. - if status.Code(err) != codes.Unavailable { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - } - if p.Addr.String() == beLis.Addr().String() { - fallbackUsed = true - break - } - time.Sleep(time.Millisecond) + // Wait for RPCs to get routed to the backend behind the remote balancer. + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } - if !fallbackUsed { - t.Fatalf("No RPC sent to fallback after 2 seconds") + + // Close backend and remote balancer connections, should use fallback. + tss.beListeners[0].(*testutils.RestartableListener).Stop() + tss.lbListener.(*testutils.RestartableListener).Stop() + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: beLis.Addr().String()}}); err != nil { + t.Fatal(err) } - // Restart backend and remote balancer, should not use backends. - tss.beListeners[0].(*restartableListener).restart() - tss.lbListener.(*restartableListener).restart() + // Restart backend and remote balancer, should not use fallback backend. + tss.beListeners[0].(*testutils.RestartableListener).Restart() + tss.lbListener.(*testutils.RestartableListener).Restart() tss.ls.sls <- sl - - var backendUsed2 bool - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed2 = true - break - } - time.Sleep(time.Millisecond) - } - if !backendUsed2 { - t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } } -func (s) TestExplicitFallback(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := newLoadBalancer(1, "", nil) +// TestGRPCLB_ExplicitFallback tests the case where the remote balancer sends an +// explicit fallback signal to the grpclb client, and the test verifies that +// RPCs are routed to the fallback backend. +func (s) TestGRPCLB_ExplicitFallback(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() + sl := &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, + } + // Push the backend address to the remote balancer. + tss.ls.sls <- sl - // Start a standalone backend. + // Start a standalone backend for fallback. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() - standaloneBEs := startBackends(beServerName, true, beLis) + standaloneBEs := startBackends(t, beServerName, true, beLis) defer stopBackends(standaloneBEs) - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, + // Configure the manual resolver with an initial state containing a service + // config with grpclb as the load balancing policy and the address of the + // fallback backend. The remote balancer address is specified via + // attributes. + r := manual.NewBuilderWithScheme("whatever") + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(grpclbConfig), } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.InitialState(rs) + + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), } - tss.ls.sls <- sl - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}}) - - var p peer.Peer - var backendUsed bool - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !backendUsed { - t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } // Send fallback signal from remote balancer; should use fallback. tss.ls.fallbackNow() - - var fallbackUsed bool - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.String() == beLis.Addr().String() { - fallbackUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !fallbackUsed { - t.Fatalf("No RPC sent to fallback after 2 seconds") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: beLis.Addr().String()}}); err != nil { + t.Fatal(err) } // Send another server list; should use backends again. tss.ls.sls <- sl - - backendUsed = false - for i := 0; i < 2000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed = true - break - } - time.Sleep(time.Millisecond) - } - if !backendUsed { - t.Fatalf("No RPC sent to backend behind remote balancer after 2 seconds") + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } } -func (s) TestFallBackWithNoServerAddress(t *testing.T) { - resolveNowCh := make(chan struct{}, 1) +// TestGRPCLB_FallBackWithNoServerAddress tests the fallback case where no +// backend addresses are returned by the remote balancer. +func (s) TestGRPCLB_FallBackWithNoServerAddress(t *testing.T) { + resolveNowCh := testutils.NewChannel() r := manual.NewBuilderWithScheme("whatever") r.ResolveNowCallback = func(resolver.ResolveNowOptions) { - select { - case <-resolveNowCh: - default: + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if err := resolveNowCh.SendContext(ctx, nil); err != nil { + t.Error("timeout when attempting to send on resolverNowCh") } - resolveNowCh <- struct{}{} } - tss, cleanup, err := newLoadBalancer(1, "", nil) + // Start a remote balancer and a backend. Don't push the backend address to + // the remote balancer yet. + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } defer cleanup() + sl := &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, + } - // Start a standalone backend. + // Start a standalone backend for fallback. beLis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen %v", err) } defer beLis.Close() - standaloneBEs := startBackends(beServerName, true, beLis) + standaloneBEs := startBackends(t, beServerName, true, beLis) defer stopBackends(standaloneBEs) - be := &lbpb.Server{ - IpAddress: tss.beIPs[0], - Port: int32(tss.bePorts[0]), - LoadBalanceToken: lbToken, + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), } - var bes []*lbpb.Server - bes = append(bes, be) - sl := &lbpb.ServerList{ - Servers: bes, - } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - - // Select grpclb with service config. - const pfc = `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"round_robin":{}}]}}]}` - scpr := r.CC.ParseServiceConfig(pfc) - if scpr.Err != nil { - t.Fatalf("Error parsing config %q: %v", pfc, scpr.Err) - } + testC := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() for i := 0; i < 2; i++ { - // Send an update with only backend address. grpclb should enter fallback - // and use the fallback backend. + // Send an update with only backend address. grpclb should enter + // fallback and use the fallback backend. r.UpdateState(resolver.State{ - Addresses: []resolver.Address{{ - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}, - ServiceConfig: scpr, + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), }) - select { - case <-resolveNowCh: - t.Errorf("unexpected resolveNow when grpclb gets no balancer address 1111, %d", i) - case <-time.After(time.Second): + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := resolveNowCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("unexpected resolveNow when grpclb gets no balancer address 1111, %d", i) } var p peer.Peer - rpcCtx, rpcCancel := context.WithTimeout(context.Background(), time.Second) - defer rpcCancel() - if _, err := testC.EmptyCall(rpcCtx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) } if p.Addr.String() != beLis.Addr().String() { t.Fatalf("got peer: %v, want peer: %v", p.Addr, beLis.Addr()) } - select { - case <-resolveNowCh: + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := resolveNowCh.Receive(sCtx); err != context.DeadlineExceeded { t.Errorf("unexpected resolveNow when grpclb gets no balancer address 2222, %d", i) - case <-time.After(time.Second): } tss.ls.sls <- sl // Send an update with balancer address. The backends behind grpclb should // be used. - r.UpdateState(resolver.State{ - Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }, { - Addr: beLis.Addr().String(), - Type: resolver.Backend, - }}, - ServiceConfig: scpr, - }) + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.UpdateState(rs) - var backendUsed bool - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - if p.Addr.(*net.TCPAddr).Port == tss.bePorts[0] { - backendUsed = true - break - } - time.Sleep(time.Millisecond) + select { + case <-ctx.Done(): + t.Fatalf("timeout when waiting for BalanceLoad RPC to be called on the remote balancer") + case <-tss.ls.balanceLoadCh: } - if !backendUsed { - t.Fatalf("No RPC sent to backend behind remote balancer after 1 second") + + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, []resolver.Address{{Addr: tss.beListeners[0].Addr().String()}}); err != nil { + t.Fatal(err) } } } -func (s) TestGRPCLBPickFirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - tss, cleanup, err := newLoadBalancer(3, "", nil) +// TestGRPCLB_PickFirst configures grpclb with pick_first as the child policy. +// The test changes the list of backend addresses returned by the remote +// balancer and verifies that RPCs are sent to the first address returned. +func (s) TestGRPCLB_PickFirst(t *testing.T) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 3, "", nil) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1120,115 +1090,289 @@ func (s) TestGRPCLBPickFirst(t *testing.T) { Port: int32(tss.bePorts[2]), LoadBalanceToken: lbToken, }} - portsToIndex := make(map[int]int) - for i := range beServers { - portsToIndex[tss.bePorts[i]] = i + beServerAddrs := []resolver.Address{} + for _, lis := range tss.beListeners { + beServerAddrs = append(beServerAddrs, resolver.Address{Addr: lis.Addr().String()}) } - creds := serverNameCheckCreds{} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, grpc.WithResolvers(r), - grpc.WithTransportCredentials(&creds), grpc.WithContextDialer(fakeNameDialer)) + // Connect to the test backends. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, dopts...) if err != nil { t.Fatalf("Failed to dial to the backend %v", err) } defer cc.Close() - testC := testpb.NewTestServiceClient(cc) - var ( - p peer.Peer - result string - ) + // Push a service config with grpclb as the load balancing policy and + // configure pick_first as its child policy. + rs := resolver.State{ServiceConfig: r.CC.ParseServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)} + + // Push a resolver update with the remote balancer address specified via + // attributes. + r.UpdateState(grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}})) + + // Push all three backend addresses to the remote balancer, and verify that + // RPCs are routed to the first backend. tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, beServerAddrs[0]); err != nil { + t.Fatal(err) + } - // Start with sub policy pick_first. - const pfc = `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}` - scpr := r.CC.ParseServiceConfig(pfc) - if scpr.Err != nil { - t.Fatalf("Error parsing config %q: %v", pfc, scpr.Err) + // Update the address list with the remote balancer and verify pick_first + // behavior based on the new backends. + tss.ls.sls <- &lbpb.ServerList{Servers: beServers[2:]} + if err := pickfirst.CheckRPCsToBackend(ctx, cc, beServerAddrs[2]); err != nil { + t.Fatal(err) } - r.UpdateState(resolver.State{ - Addresses: []resolver.Address{{ - Addr: tss.lbAddr, - Type: resolver.GRPCLB, - ServerName: lbServerName, - }}, - ServiceConfig: scpr, - }) + // Update the address list with the remote balancer and verify pick_first + // behavior based on the new backends. Since the currently connected backend + // is in the new list (even though it is not the first one on the list), + // pick_first will continue to use it. + tss.ls.sls <- &lbpb.ServerList{Servers: beServers[1:]} + if err := pickfirst.CheckRPCsToBackend(ctx, cc, beServerAddrs[2]); err != nil { + t.Fatal(err) + } - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) + // Switch child policy to roundrobin. + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, } - if seq := "00000"; !strings.Contains(result, strings.Repeat(seq, 100)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, s) + r.UpdateState(rs) + testC := testgrpc.NewTestServiceClient(cc) + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, beServerAddrs[1:]); err != nil { + t.Fatal(err) } - tss.ls.sls <- &lbpb.ServerList{Servers: beServers[2:]} - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) + tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} + if err := roundrobin.CheckRoundRobinRPCs(ctx, testC, beServerAddrs[0:3]); err != nil { + t.Fatal(err) } - if seq := "22222"; !strings.Contains(result, strings.Repeat(seq, 100)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) +} + +// TestGRPCLB_BackendConnectionErrorPropagation tests the case where grpclb +// falls back to a backend which returns an error and the test verifies that the +// error is propagated to the RPC. +func (s) TestGRPCLB_BackendConnectionErrorPropagation(t *testing.T) { + r := manual.NewBuilderWithScheme("whatever") + + // Start up an LB which will tells the client to fall back right away. + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 0, "", nil) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) } + defer cleanup() - tss.ls.sls <- &lbpb.ServerList{Servers: beServers[1:]} - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) + // Start a standalone backend, to be used during fallback. The creds + // are intentionally misconfigured in order to simulate failure of a + // security handshake. + beLis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen %v", err) + } + defer beLis.Close() + standaloneBEs := startBackends(t, "arbitrary.invalid.name", true, beLis) + defer stopBackends(standaloneBEs) + + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testgrpc.NewTestServiceClient(cc) + + rs := resolver.State{ + Addresses: []resolver.Address{{Addr: beLis.Addr().String()}}, + ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig), + } + rs = grpclbstate.Set(rs, &grpclbstate.State{BalancerAddresses: []resolver.Address{{Addr: tss.lbAddr, ServerName: lbServerName}}}) + r.UpdateState(rs) + + // If https://github.com/grpc/grpc-go/blob/65cabd74d8e18d7347fecd414fa8d83a00035f5f/balancer/grpclb/grpclb_test.go#L103 + // changes, then expectedErrMsg may need to be updated. + const expectedErrMsg = "received unexpected server name" + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + var wg sync.WaitGroup + wg.Add(1) + go func() { + tss.ls.fallbackNow() + wg.Done() + }() + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err == nil || !strings.Contains(err.Error(), expectedErrMsg) { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, rpc error containing substring: %q", testC, err, expectedErrMsg) + } + wg.Wait() +} + +func testGRPCLBEmptyServerList(t *testing.T, svcfg string) { + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) + } + defer cleanup() + + beServers := []*lbpb.Server{{ + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }} + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + } + cc, err := grpc.DialContext(ctx, r.Scheme()+":///"+beServerName, dopts...) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testgrpc.NewTestServiceClient(cc) + + tss.ls.sls <- &lbpb.ServerList{Servers: beServers} + + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, + } + rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(svcfg)}, s) + r.UpdateState(rs) + t.Log("Perform an initial RPC and expect it to succeed...") + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("Initial _.EmptyCall(_, _) = _, %v, want _, ", err) + } + t.Log("Now send an empty server list. Wait until we see an RPC failure to make sure the client got it...") + tss.ls.sls <- &lbpb.ServerList{} + gotError := false + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { + gotError = true + break } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) } - if seq := "22222"; !strings.Contains(result, strings.Repeat(seq, 100)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + if !gotError { + t.Fatalf("Expected to eventually see an RPC fail after the grpclb sends an empty server list, but none did.") } + t.Log("Now send a non-empty server list. A wait-for-ready RPC should now succeed...") + tss.ls.sls <- &lbpb.ServerList{Servers: beServers} + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("Final _.EmptyCall(_, _) = _, %v, want _, ", err) + } +} + +func (s) TestGRPCLBEmptyServerListRoundRobin(t *testing.T) { + testGRPCLBEmptyServerList(t, `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"round_robin":{}}]}}]}`) +} + +func (s) TestGRPCLBEmptyServerListPickFirst(t *testing.T) { + testGRPCLBEmptyServerList(t, `{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`) +} - // Switch sub policy to roundrobin. - grpclbServiceConfigEmpty := r.CC.ParseServiceConfig(`{}`) - if grpclbServiceConfigEmpty.Err != nil { - t.Fatalf("Error parsing config %q: %v", `{}`, grpclbServiceConfigEmpty.Err) +func (s) TestGRPCLBWithTargetNameFieldInConfig(t *testing.T) { + r := manual.NewBuilderWithScheme("whatever") + + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", nil) + if err != nil { + t.Fatalf("failed to create new load balancer: %v", err) } + defer cleanup() + sl := &lbpb.ServerList{ + Servers: []*lbpb.Server{ + { + IpAddress: tss.beIPs[0], + Port: int32(tss.bePorts[0]), + LoadBalanceToken: lbToken, + }, + }, + } + // Push the backend address to the remote balancer. + tss.ls.sls <- sl - r.UpdateState(resolver.State{ - Addresses: []resolver.Address{{ + cc, err := grpc.Dial(r.Scheme()+":///"+beServerName, + grpc.WithResolvers(r), + grpc.WithTransportCredentials(&serverNameCheckCreds{}), + grpc.WithContextDialer(fakeNameDialer), + grpc.WithUserAgent(testUserAgent)) + if err != nil { + t.Fatalf("Failed to dial to the backend %v", err) + } + defer cc.Close() + testC := testgrpc.NewTestServiceClient(cc) + + // Push a resolver update with grpclb configuration which does not contain the + // target_name field. Our fake remote balancer is configured to always + // expect `beServerName` as the server name in the initial request. + rs := grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(grpclbConfig)}, + &grpclbstate.State{BalancerAddresses: []resolver.Address{{ Addr: tss.lbAddr, - Type: resolver.GRPCLB, ServerName: lbServerName, - }}, - ServiceConfig: grpclbServiceConfigEmpty, - }) + }}}) + r.UpdateState(rs) - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("_.EmptyCall(_, _) = _, %v, want _, ", err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout when waiting for BalanceLoad RPC to be called on the remote balancer") + case <-tss.ls.balanceLoadCh: } - if seq := "121212"; !strings.Contains(result, strings.Repeat(seq, 100)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } - tss.ls.sls <- &lbpb.ServerList{Servers: beServers[0:3]} - result = "" - for i := 0; i < 1000; i++ { - if _, err := testC.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true), grpc.Peer(&p)); err != nil { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) - } - result += strconv.Itoa(portsToIndex[p.Addr.(*net.TCPAddr).Port]) + // When the value of target_field changes, grpclb will recreate the stream + // to the remote balancer. So, we need to update the fake remote balancer to + // expect a new server name in the initial request. + const newServerName = "new-server-name" + tss.ls.updateServerName(newServerName) + tss.ls.sls <- sl + + // Push the resolver update with target_field changed. + // Push a resolver update with grpclb configuration containing the + // target_name field. Our fake remote balancer has been updated above to expect the newServerName in the initial request. + lbCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"grpclb": {"serviceName": "%s"}}]}`, newServerName) + s := &grpclbstate.State{ + BalancerAddresses: []resolver.Address{ + { + Addr: tss.lbAddr, + ServerName: lbServerName, + }, + }, } - if seq := "012012012"; !strings.Contains(result, strings.Repeat(seq, 2)) { - t.Errorf("got result sequence %q, want patten %q", result, seq) + rs = grpclbstate.Set(resolver.State{ServiceConfig: r.CC.ParseServiceConfig(lbCfg)}, s) + r.UpdateState(rs) + select { + case <-ctx.Done(): + t.Fatalf("timeout when waiting for BalanceLoad RPC to be called on the remote balancer") + case <-tss.ls.balanceLoadCh: + } + + if _, err := testC.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, ", testC, err) } } @@ -1255,7 +1399,7 @@ func checkStats(stats, expected *rpcStats) error { func runAndCheckStats(t *testing.T, drop bool, statsChan chan *lbpb.ClientStats, runRPCs func(*grpc.ClientConn), statsWant *rpcStats) error { r := manual.NewBuilderWithScheme("whatever") - tss, cleanup, err := newLoadBalancer(1, "", statsChan) + tss, cleanup, err := startBackendsAndRemoteLoadBalancer(t, 1, "", statsChan) if err != nil { t.Fatalf("failed to create new load balancer: %v", err) } @@ -1310,7 +1454,7 @@ const ( func (s) TestGRPCLBStatsUnarySuccess(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1331,7 +1475,7 @@ func (s) TestGRPCLBStatsUnarySuccess(t *testing.T) { func (s) TestGRPCLBStatsUnaryDrop(t *testing.T) { if err := runAndCheckStats(t, true, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1353,7 +1497,7 @@ func (s) TestGRPCLBStatsUnaryDrop(t *testing.T) { func (s) TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1375,7 +1519,7 @@ func (s) TestGRPCLBStatsUnaryFailedToSend(t *testing.T) { func (s) TestGRPCLBStatsStreamingSuccess(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1410,7 +1554,7 @@ func (s) TestGRPCLBStatsStreamingSuccess(t *testing.T) { func (s) TestGRPCLBStatsStreamingDrop(t *testing.T) { if err := runAndCheckStats(t, true, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. @@ -1446,7 +1590,7 @@ func (s) TestGRPCLBStatsStreamingDrop(t *testing.T) { func (s) TestGRPCLBStatsStreamingFailedToSend(t *testing.T) { if err := runAndCheckStats(t, false, nil, func(cc *grpc.ClientConn) { - testC := testpb.NewTestServiceClient(cc) + testC := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultFallbackTimeout) defer cancel() // The first non-failfast RPC succeeds, all connections are up. diff --git a/balancer/grpclb/grpclb_test_util_test.go b/balancer/grpclb/grpclb_test_util_test.go deleted file mode 100644 index 5d3e6ba7fed9..000000000000 --- a/balancer/grpclb/grpclb_test_util_test.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpclb - -import ( - "net" - "sync" -) - -type tempError struct{} - -func (*tempError) Error() string { - return "grpclb test temporary error" -} -func (*tempError) Temporary() bool { - return true -} - -type restartableListener struct { - net.Listener - addr string - - mu sync.Mutex - closed bool - conns []net.Conn -} - -func newRestartableListener(l net.Listener) *restartableListener { - return &restartableListener{ - Listener: l, - addr: l.Addr().String(), - } -} - -func (l *restartableListener) Accept() (conn net.Conn, err error) { - conn, err = l.Listener.Accept() - if err == nil { - l.mu.Lock() - if l.closed { - conn.Close() - l.mu.Unlock() - return nil, &tempError{} - } - l.conns = append(l.conns, conn) - l.mu.Unlock() - } - return -} - -func (l *restartableListener) Close() error { - return l.Listener.Close() -} - -func (l *restartableListener) stopPreviousConns() { - l.mu.Lock() - l.closed = true - tmp := l.conns - l.conns = nil - l.mu.Unlock() - for _, conn := range tmp { - conn.Close() - } -} - -func (l *restartableListener) restart() { - l.mu.Lock() - l.closed = false - l.mu.Unlock() -} diff --git a/balancer/grpclb/state/state.go b/balancer/grpclb/state/state.go index a24264a34f5f..4ecfa1c21511 100644 --- a/balancer/grpclb/state/state.go +++ b/balancer/grpclb/state/state.go @@ -39,7 +39,7 @@ type State struct { // Set returns a copy of the provided state with attributes containing s. s's // data should not be mutated after calling Set. func Set(state resolver.State, s *State) resolver.State { - state.Attributes = state.Attributes.WithValues(key, s) + state.Attributes = state.Attributes.WithValue(key, s) return state } diff --git a/balancer/rls/balancer.go b/balancer/rls/balancer.go new file mode 100644 index 000000000000..076aae8c99f0 --- /dev/null +++ b/balancer/rls/balancer.go @@ -0,0 +1,658 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS LB policy. +package rls + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/buffer" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" +) + +const ( + // Name is the name of the RLS LB policy. + // + // It currently has an experimental suffix which would be removed once + // end-to-end testing of the policy is completed. + Name = internal.RLSLoadBalancingPolicyName + // Default frequency for data cache purging. + periodicCachePurgeFreq = time.Minute +) + +var ( + logger = grpclog.Component("rls") + errBalancerClosed = errors.New("rls LB policy is closed") + + // Below defined vars for overriding in unit tests. + + // Default exponential backoff strategy for data cache entries. + defaultBackoffStrategy = backoff.Strategy(backoff.DefaultExponential) + // Ticker used for periodic data cache purging. + dataCachePurgeTicker = func() *time.Ticker { return time.NewTicker(periodicCachePurgeFreq) } + // We want every cache entry to live in the cache for at least this + // duration. If we encounter a cache entry whose minimum expiration time is + // in the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases where + // the cache is too small, when we receive an RLS Response, we keep the + // resulting cache entry around long enough for the pending incoming + // requests to be re-processed through the new Picker. If we didn't do this, + // then we'd risk throwing away each RLS response as we receive it, in which + // case we would fail to actually route any of our incoming requests. + minEvictDuration = 5 * time.Second + + // Following functions are no-ops in actual code, but can be overridden in + // tests to give tests visibility into exactly when certain events happen. + clientConnUpdateHook = func() {} + dataCachePurgeHook = func() {} + resetBackoffHook = func() {} +) + +func init() { + balancer.Register(&rlsBB{}) +} + +type rlsBB struct{} + +func (rlsBB) Name() string { + return Name +} + +func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + lb := &rlsBalancer{ + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + cc: cc, + bopts: opts, + purgeTicker: dataCachePurgeTicker(), + dataCachePurgeHook: dataCachePurgeHook, + lbCfg: &lbConfig{}, + pendingMap: make(map[cacheKey]*backoffState), + childPolicies: make(map[string]*childPolicyWrapper), + updateCh: buffer.NewUnbounded(), + } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) + lb.dataCache = newDataCache(maxCacheSize, lb.logger) + lb.bg = balancergroup.New(cc, opts, lb, lb.logger) + lb.bg.Start() + go lb.run() + return lb +} + +// rlsBalancer implements the RLS LB policy. +type rlsBalancer struct { + closed *grpcsync.Event // Fires when Close() is invoked. Guarded by stateMu. + done *grpcsync.Event // Fires when Close() is done. + cc balancer.ClientConn + bopts balancer.BuildOptions + purgeTicker *time.Ticker + dataCachePurgeHook func() + logger *internalgrpclog.PrefixLogger + + // If both cacheMu and stateMu need to be acquired, the former must be + // acquired first to prevent a deadlock. This order restriction is due to the + // fact that in places where we need to acquire both the locks, we always + // start off reading the cache. + + // cacheMu guards access to the data cache and pending requests map. We + // cannot use an RWMutex here since even an operation like + // dataCache.getEntry() modifies the underlying LRU, which is implemented as + // a doubly linked list. + cacheMu sync.Mutex + dataCache *dataCache // Cache of RLS data. + pendingMap map[cacheKey]*backoffState // Map of pending RLS requests. + + // stateMu guards access to all LB policy state. + stateMu sync.Mutex + lbCfg *lbConfig // Most recently received service config. + childPolicyBuilder balancer.Builder // Cached child policy builder. + resolverState resolver.State // Cached resolver state. + ctrlCh *controlChannel // Control channel to the RLS server. + bg *balancergroup.BalancerGroup + childPolicies map[string]*childPolicyWrapper + defaultPolicy *childPolicyWrapper + // A reference to the most recent picker sent to gRPC as part of a state + // update is cached in this field so that we can release the reference to the + // default child policy wrapper when a new picker is created. See + // sendNewPickerLocked() for details. + lastPicker *rlsPicker + // Set during UpdateClientConnState when pushing updates to child policies. + // Prevents state updates from child policies causing new pickers to be sent + // up the channel. Cleared after all child policies have processed the + // updates sent to them, after which a new picker is sent up the channel. + inhibitPickerUpdates bool + + // Channel on which all updates are pushed. Processed in run(). + updateCh *buffer.Unbounded +} + +type resumePickerUpdates struct { + done chan struct{} +} + +// childPolicyIDAndState wraps a child policy id and its state update. +type childPolicyIDAndState struct { + id string + state balancer.State +} + +type controlChannelReady struct{} + +// run is a long-running goroutine which handles all the updates that the +// balancer wishes to handle. The appropriate updateHandler will push the update +// on to a channel that this goroutine will select on, thereby the handling of +// the update will happen asynchronously. +func (b *rlsBalancer) run() { + // We exit out of the for loop below only after `Close()` has been invoked. + // Firing the done event here will ensure that Close() returns only after + // all goroutines are done. + defer func() { b.done.Fire() }() + + // Wait for purgeDataCache() goroutine to exit before returning from here. + doneCh := make(chan struct{}) + defer func() { + <-doneCh + }() + go b.purgeDataCache(doneCh) + + for { + select { + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } + b.updateCh.Load() + switch update := u.(type) { + case childPolicyIDAndState: + b.handleChildPolicyStateUpdate(update.id, update.state) + case controlChannelReady: + b.logger.Infof("Resetting backoff state after control channel getting back to READY") + b.cacheMu.Lock() + updatePicker := b.dataCache.resetBackoffState(&backoffState{bs: defaultBackoffStrategy}) + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + resetBackoffHook() + case resumePickerUpdates: + b.stateMu.Lock() + b.logger.Infof("Resuming picker updates after config propagation to child policies") + b.inhibitPickerUpdates = false + b.sendNewPickerLocked() + close(update.done) + b.stateMu.Unlock() + default: + b.logger.Errorf("Unsupported update type %T", update) + } + case <-b.closed.Done(): + return + } + } +} + +// purgeDataCache is a long-running goroutine which periodically deletes expired +// entries. An expired entry is one for which both the expiryTime and +// backoffExpiryTime are in the past. +func (b *rlsBalancer) purgeDataCache(doneCh chan struct{}) { + defer close(doneCh) + + for { + select { + case <-b.closed.Done(): + return + case <-b.purgeTicker.C: + b.cacheMu.Lock() + updatePicker := b.dataCache.evictExpiredEntries() + b.cacheMu.Unlock() + if updatePicker { + b.sendNewPicker() + } + b.dataCachePurgeHook() + } + } +} + +func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + defer clientConnUpdateHook() + + b.stateMu.Lock() + if b.closed.HasFired() { + b.stateMu.Unlock() + b.logger.Warningf("Received service config after balancer close: %s", pretty.ToJSON(ccs.BalancerConfig)) + return errBalancerClosed + } + + newCfg := ccs.BalancerConfig.(*lbConfig) + if b.lbCfg.Equal(newCfg) { + b.stateMu.Unlock() + b.logger.Infof("New service config matches existing config") + return nil + } + + b.logger.Infof("Delaying picker updates until config is propagated to and processed by child policies") + b.inhibitPickerUpdates = true + + // When the RLS server name changes, the old control channel needs to be + // swapped out for a new one. All state associated with the throttling + // algorithm is stored on a per-control-channel basis; when we swap out + // channels, we also swap out the throttling state. + b.handleControlChannelUpdate(newCfg) + + // Any changes to child policy name or configuration needs to be handled by + // either creating new child policies or pushing updates to existing ones. + b.resolverState = ccs.ResolverState + b.handleChildPolicyConfigUpdate(newCfg, &ccs) + + // Resize the cache if the size in the config has changed. + resizeCache := newCfg.cacheSizeBytes != b.lbCfg.cacheSizeBytes + + // Update the copy of the config in the LB policy before releasing the lock. + b.lbCfg = newCfg + + // Enqueue an event which will notify us when the above update has been + // propagated to all child policies, and the child policies have all + // processed their updates, and we have sent a picker update. + done := make(chan struct{}) + b.updateCh.Put(resumePickerUpdates{done: done}) + b.stateMu.Unlock() + <-done + + if resizeCache { + // If the new config changes reduces the size of the data cache, we + // might have to evict entries to get the cache size down to the newly + // specified size. + // + // And we cannot do this operation above (where we compute the + // `resizeCache` boolean) because `cacheMu` needs to be grabbed before + // `stateMu` if we are to hold both locks at the same time. + b.cacheMu.Lock() + b.dataCache.resize(newCfg.cacheSizeBytes) + b.cacheMu.Unlock() + } + return nil +} + +// handleControlChannelUpdate handles updates to service config fields which +// influence the control channel to the RLS server. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleControlChannelUpdate(newCfg *lbConfig) { + if newCfg.lookupService == b.lbCfg.lookupService && newCfg.lookupServiceTimeout == b.lbCfg.lookupServiceTimeout { + return + } + + // Create a new control channel and close the existing one. + b.logger.Infof("Creating control channel to RLS server at: %v", newCfg.lookupService) + backToReadyFn := func() { + b.updateCh.Put(controlChannelReady{}) + } + ctrlCh, err := newControlChannel(newCfg.lookupService, newCfg.controlChannelServiceConfig, newCfg.lookupServiceTimeout, b.bopts, backToReadyFn) + if err != nil { + // This is very uncommon and usually represents a non-transient error. + // There is not much we can do here other than wait for another update + // which might fix things. + b.logger.Errorf("Failed to create control channel to %q: %v", newCfg.lookupService, err) + return + } + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.ctrlCh = ctrlCh +} + +// handleChildPolicyConfigUpdate handles updates to service config fields which +// influence child policy configuration. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) handleChildPolicyConfigUpdate(newCfg *lbConfig, ccs *balancer.ClientConnState) { + // Update child policy builder first since other steps are dependent on this. + if b.childPolicyBuilder == nil || b.childPolicyBuilder.Name() != newCfg.childPolicyName { + b.logger.Infof("Child policy changed to %q", newCfg.childPolicyName) + b.childPolicyBuilder = balancer.Get(newCfg.childPolicyName) + for _, cpw := range b.childPolicies { + // If the child policy has changed, we need to remove the old policy + // from the BalancerGroup and add a new one. The BalancerGroup takes + // care of closing the old one in this case. + b.bg.Remove(cpw.target) + b.bg.Add(cpw.target, b.childPolicyBuilder) + } + } + + configSentToDefault := false + if b.lbCfg.defaultTarget != newCfg.defaultTarget { + // If the default target has changed, create a new childPolicyWrapper for + // the new target if required. If a new wrapper is created, add it to the + // childPolicies map and the BalancerGroup. + b.logger.Infof("Default target in LB config changing from %q to %q", b.lbCfg.defaultTarget, newCfg.defaultTarget) + cpw := b.childPolicies[newCfg.defaultTarget] + if cpw == nil { + cpw = newChildPolicyWrapper(newCfg.defaultTarget) + b.childPolicies[newCfg.defaultTarget] = cpw + b.bg.Add(newCfg.defaultTarget, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", newCfg.defaultTarget) + } + if err := b.buildAndPushChildPolicyConfigs(newCfg.defaultTarget, newCfg, ccs); err != nil { + cpw.lamify(err) + } + + // If an old default exists, release its reference. If this was the last + // reference, remove the child policy from the BalancerGroup and remove the + // corresponding entry the childPolicies map. + if b.defaultPolicy != nil { + if b.defaultPolicy.releaseRef() { + delete(b.childPolicies, b.lbCfg.defaultTarget) + b.bg.Remove(b.defaultPolicy.target) + } + } + b.defaultPolicy = cpw + configSentToDefault = true + } + + // No change in configuration affecting child policies. Return early. + if b.lbCfg.childPolicyName == newCfg.childPolicyName && b.lbCfg.childPolicyTargetField == newCfg.childPolicyTargetField && childPolicyConfigEqual(b.lbCfg.childPolicyConfig, newCfg.childPolicyConfig) { + return + } + + // If fields affecting child policy configuration have changed, the changes + // are pushed to the childPolicyWrapper which handles them appropriately. + for _, cpw := range b.childPolicies { + if configSentToDefault && cpw.target == newCfg.defaultTarget { + // Default target has already been taken care of. + continue + } + if err := b.buildAndPushChildPolicyConfigs(cpw.target, newCfg, ccs); err != nil { + cpw.lamify(err) + } + } +} + +// buildAndPushChildPolicyConfigs builds the final child policy configuration by +// adding the `targetField` to the base child policy configuration received in +// RLS LB policy configuration. The `targetField` is set to target and +// configuration is pushed to the child policy through the BalancerGroup. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) buildAndPushChildPolicyConfigs(target string, newCfg *lbConfig, ccs *balancer.ClientConnState) error { + jsonTarget, err := json.Marshal(target) + if err != nil { + return fmt.Errorf("failed to marshal child policy target %q: %v", target, err) + } + + config := newCfg.childPolicyConfig + targetField := newCfg.childPolicyTargetField + config[targetField] = jsonTarget + jsonCfg, err := json.Marshal(config) + if err != nil { + return fmt.Errorf("failed to marshal child policy config %+v: %v", config, err) + } + + parser, _ := b.childPolicyBuilder.(balancer.ConfigParser) + parsedCfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("childPolicy config parsing failed: %v", err) + } + + state := balancer.ClientConnState{ResolverState: ccs.ResolverState, BalancerConfig: parsedCfg} + b.logger.Infof("Pushing new state to child policy %q: %+v", target, state) + if err := b.bg.UpdateClientConnState(target, state); err != nil { + b.logger.Warningf("UpdateClientConnState(%q, %+v) failed : %v", target, ccs, err) + } + return nil +} + +func (b *rlsBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *rlsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.bg.UpdateSubConnState(sc, state) +} + +func (b *rlsBalancer) Close() { + b.stateMu.Lock() + b.closed.Fire() + b.purgeTicker.Stop() + if b.ctrlCh != nil { + b.ctrlCh.close() + } + b.bg.Close() + b.stateMu.Unlock() + + b.cacheMu.Lock() + b.dataCache.stop() + b.cacheMu.Unlock() + + b.updateCh.Close() + + <-b.done.Done() +} + +func (b *rlsBalancer) ExitIdle() { + b.bg.ExitIdle() +} + +// sendNewPickerLocked pushes a new picker on to the channel. +// +// Note that regardless of what connectivity state is reported, the policy will +// return its own picker, and not a picker that unconditionally queues +// (typically used for IDLE or CONNECTING) or a picker that unconditionally +// fails (typically used for TRANSIENT_FAILURE). This is required because, +// irrespective of the connectivity state, we need to able to perform RLS +// lookups for incoming RPCs and affect the status of queued RPCs based on the +// receipt of RLS responses. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) sendNewPickerLocked() { + aggregatedState := b.aggregatedConnectivityState() + + // Acquire a separate reference for the picker. This is required to ensure + // that the wrapper held by the old picker is not closed when the default + // target changes in the config, and a new wrapper is created for the new + // default target. See handleChildPolicyConfigUpdate() for how config changes + // affecting the default target are handled. + if b.defaultPolicy != nil { + b.defaultPolicy.acquireRef() + } + picker := &rlsPicker{ + kbm: b.lbCfg.kbMap, + origEndpoint: b.bopts.Target.Endpoint(), + lb: b, + defaultPolicy: b.defaultPolicy, + ctrlCh: b.ctrlCh, + maxAge: b.lbCfg.maxAge, + staleAge: b.lbCfg.staleAge, + bg: b.bg, + } + picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker)) + state := balancer.State{ + ConnectivityState: aggregatedState, + Picker: picker, + } + + if !b.inhibitPickerUpdates { + b.logger.Infof("New balancer.State: %+v", state) + b.cc.UpdateState(state) + } else { + b.logger.Infof("Delaying picker update: %+v", state) + } + + if b.lastPicker != nil { + if b.defaultPolicy != nil { + b.defaultPolicy.releaseRef() + } + } + b.lastPicker = picker +} + +func (b *rlsBalancer) sendNewPicker() { + b.stateMu.Lock() + defer b.stateMu.Unlock() + if b.closed.HasFired() { + return + } + b.sendNewPickerLocked() +} + +// The aggregated connectivity state reported is determined as follows: +// - If there is at least one child policy in state READY, the connectivity +// state is READY. +// - Otherwise, if there is at least one child policy in state CONNECTING, the +// connectivity state is CONNECTING. +// - Otherwise, if there is at least one child policy in state IDLE, the +// connectivity state is IDLE. +// - Otherwise, all child policies are in TRANSIENT_FAILURE, and the +// connectivity state is TRANSIENT_FAILURE. +// +// If the RLS policy has no child policies and no configured default target, +// then we will report connectivity state IDLE. +// +// Caller must hold lb.stateMu. +func (b *rlsBalancer) aggregatedConnectivityState() connectivity.State { + if len(b.childPolicies) == 0 && b.lbCfg.defaultTarget == "" { + return connectivity.Idle + } + + var readyN, connectingN, idleN int + for _, cpw := range b.childPolicies { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + switch state.ConnectivityState { + case connectivity.Ready: + readyN++ + case connectivity.Connecting: + connectingN++ + case connectivity.Idle: + idleN++ + } + } + + switch { + case readyN > 0: + return connectivity.Ready + case connectingN > 0: + return connectivity.Connecting + case idleN > 0: + return connectivity.Idle + default: + return connectivity.TransientFailure + } +} + +// UpdateState is a implementation of the balancergroup.BalancerStateAggregator +// interface. The actual state aggregation functionality is handled +// asynchronously. This method only pushes the state update on to channel read +// and dispatched by the run() goroutine. +func (b *rlsBalancer) UpdateState(id string, state balancer.State) { + b.updateCh.Put(childPolicyIDAndState{id: id, state: state}) +} + +// handleChildPolicyStateUpdate provides the state aggregator functionality for +// the BalancerGroup. +// +// This method is invoked by the BalancerGroup whenever a child policy sends a +// state update. We cache the child policy's connectivity state and picker for +// two reasons: +// - to suppress connectivity state transitions from TRANSIENT_FAILURE to states +// other than READY +// - to delegate picks to child policies +func (b *rlsBalancer) handleChildPolicyStateUpdate(id string, newState balancer.State) { + b.stateMu.Lock() + defer b.stateMu.Unlock() + + cpw := b.childPolicies[id] + if cpw == nil { + // All child policies start with an entry in the map. If ID is not in + // map, it's either been removed, or never existed. + b.logger.Warningf("Received state update %+v for missing child policy %q", newState, id) + return + } + + oldState := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + if oldState.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting { + // Ignore state transitions from TRANSIENT_FAILURE to CONNECTING, and thus + // fail pending RPCs instead of queuing them indefinitely when all + // subChannels are failing, even if the subChannels are bouncing back and + // forth between CONNECTING and TRANSIENT_FAILURE. + return + } + atomic.StorePointer(&cpw.state, unsafe.Pointer(&newState)) + b.logger.Infof("Child policy %q has new state %+v", id, newState) + b.sendNewPickerLocked() +} + +// acquireChildPolicyReferences attempts to acquire references to +// childPolicyWrappers corresponding to the passed in targets. If there is no +// childPolicyWrapper corresponding to one of the targets, a new one is created +// and added to the BalancerGroup. +func (b *rlsBalancer) acquireChildPolicyReferences(targets []string) []*childPolicyWrapper { + b.stateMu.Lock() + var newChildPolicies []*childPolicyWrapper + for _, target := range targets { + // If the target exists in the LB policy's childPolicies map. a new + // reference is taken here and added to the new list. + if cpw := b.childPolicies[target]; cpw != nil { + cpw.acquireRef() + newChildPolicies = append(newChildPolicies, cpw) + continue + } + + // If the target does not exist in the child policy map, then a new + // child policy wrapper is created and added to the new list. + cpw := newChildPolicyWrapper(target) + b.childPolicies[target] = cpw + b.bg.Add(target, b.childPolicyBuilder) + b.logger.Infof("Child policy %q added to BalancerGroup", target) + newChildPolicies = append(newChildPolicies, cpw) + if err := b.buildAndPushChildPolicyConfigs(target, b.lbCfg, &balancer.ClientConnState{ + ResolverState: b.resolverState, + }); err != nil { + cpw.lamify(err) + } + } + b.stateMu.Unlock() + return newChildPolicies +} + +// releaseChildPolicyReferences releases references to childPolicyWrappers +// corresponding to the passed in targets. If the release reference was the last +// one, the child policy is removed from the BalancerGroup. +func (b *rlsBalancer) releaseChildPolicyReferences(targets []string) { + b.stateMu.Lock() + for _, target := range targets { + if cpw := b.childPolicies[target]; cpw.releaseRef() { + delete(b.childPolicies, cpw.target) + b.bg.Remove(cpw.target) + } + } + b.stateMu.Unlock() +} diff --git a/balancer/rls/balancer_test.go b/balancer/rls/balancer_test.go new file mode 100644 index 000000000000..20da394ab2b2 --- /dev/null +++ b/balancer/rls/balancer_test.go @@ -0,0 +1,1134 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/test/e2e" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + rlstest "google.golang.org/grpc/internal/testutils/rls" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/testdata" + "google.golang.org/protobuf/types/known/durationpb" +) + +// TestConfigUpdate_ControlChannel tests the scenario where a config update +// changes the RLS server name. Verifies that the new control channel is created +// and the old one is closed. +func (s) TestConfigUpdate_ControlChannel(t *testing.T) { + // Start two RLS servers. + lis1 := testutils.NewListenerWrapper(t, nil) + rlsServer1, rlsReqCh1 := rlstest.SetupFakeRLSServer(t, lis1) + lis2 := testutils.NewListenerWrapper(t, nil) + rlsServer2, rlsReqCh2 := rlstest.SetupFakeRLSServer(t, lis2) + + // Build RLS service config with the RLS server pointing to the first one. + // Set a very low value for maxAge to ensure that the entry expires soon. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer1.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a couple of test backends, and set up the fake RLS servers to return + // these as a target in the RLS response. + backendCh1, backendAddress1 := startBackend(t) + rlsServer1.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress1}}} + }) + backendCh2, backendAddress2 := startBackend(t) + rlsServer2.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress2}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh1) + + // Ensure a connection is established to the first RLS server. + val, err := lis1.NewConnCh.Receive(ctx) + if err != nil { + t.Fatal("Timeout expired when waiting for LB policy to create control channel") + } + conn1 := val.(*testutils.ConnWrapper) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh1, true) + + // Change lookup_service field of the RLS config to point to the second one. + rlsConfig.RouteLookupConfig.LookupService = rlsServer2.Address + + // Push the config update through the manual resolver. + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + // Ensure a connection is established to the second RLS server. + if _, err := lis2.NewConnCh.Receive(ctx); err != nil { + t.Fatal("Timeout expired when waiting for LB policy to create control channel") + } + + // Ensure the connection to the old one is closed. + if _, err := conn1.CloseCh.Receive(ctx); err != nil { + t.Fatal("Timeout expired when waiting for LB policy to close control channel") + } + + // Make an RPC and expect it to get routed to the second test backend through + // the second RLS server. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh2) + verifyRLSRequest(t, rlsReqCh2, true) +} + +// TestConfigUpdate_ControlChannelWithCreds tests the scenario where a config +// update specified an RLS server name, and the parent ClientConn specifies +// transport credentials. The RLS server and the test backend are configured to +// accept those transport credentials. This test verifies that the parent +// channel credentials are correctly propagated to the control channel. +func (s) TestConfigUpdate_ControlChannelWithCreds(t *testing.T) { + serverCreds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) + if err != nil { + t.Fatalf("credentials.NewServerTLSFromFile(server1.pem, server1.key) = %v", err) + } + clientCreds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "") + if err != nil { + t.Fatalf("credentials.NewClientTLSFromFile(ca.pem) = %v", err) + } + + // Start an RLS server with the wrapped listener and credentials. + lis := testutils.NewListenerWrapper(t, nil) + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, lis, grpc.Creds(serverCreds)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build RLS service config. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Start a test backend which uses the same credentials as the RLS server, + // and set up the fake RLS server to return this as the target in the RLS + // response. + backendCh, backendAddress := startBackend(t, grpc.Creds(serverCreds)) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial with credentials and expect the RLS server to receive the same. The + // server certificate used for the RLS server and the backend specifies a + // DNS SAN of "*.test.example.com". Hence we use a dial target which is a + // subdomain of the same here. + cc, err := grpc.Dial(r.Scheme()+":///rls.test.example.com", grpc.WithResolvers(r), grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Ensure a connection is established to the first RLS server. + if _, err := lis.NewConnCh.Receive(ctx); err != nil { + t.Fatal("Timeout expired when waiting for LB policy to create control channel") + } +} + +// TestConfigUpdate_ControlChannelServiceConfig tests the scenario where RLS LB +// policy's configuration specifies the service config for the control channel +// via the `routeLookupChannelServiceConfig` field. This test verifies that the +// provided service config is applied for the control channel. +func (s) TestConfigUpdate_ControlChannelServiceConfig(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Register a balancer to be used for the control channel, and set up a + // callback to get notified when the balancer receives a clientConn updates. + ccUpdateCh := testutils.NewChannel() + bf := &e2e.BalancerFuncs{ + UpdateClientConnState: func(cfg *e2e.RLSChildPolicyConfig) error { + if cfg.Backend != rlsServer.Address { + return fmt.Errorf("control channel LB policy received config with backend %q, want %q", cfg.Backend, rlsServer.Address) + } + ccUpdateCh.Replace(nil) + return nil + }, + } + controlChannelPolicyName := "test-control-channel-" + t.Name() + e2e.RegisterRLSChildPolicy(controlChannelPolicyName, bf) + t.Logf("Registered child policy with name %q", controlChannelPolicyName) + + // Build RLS service config and set the `routeLookupChannelServiceConfig` + // field to a service config which uses the above balancer. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + rlsConfig.RouteLookupChannelServiceConfig = fmt.Sprintf(`{"loadBalancingConfig" : [{%q: {"backend": %q} }]}`, controlChannelPolicyName, rlsServer.Address) + + // Start a test backend, and set up the fake RLS server to return this as a + // target in the RLS response. + backendCh, backendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///rls.test.example.com", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Verify that the control channel is using the LB policy we injected via the + // routeLookupChannelServiceConfig field. + if _, err := ccUpdateCh.Receive(ctx); err != nil { + t.Fatalf("timeout when waiting for control channel LB policy to receive a clientConn update") + } +} + +// TestConfigUpdate_DefaultTarget tests the scenario where a config update +// changes the default target. Verifies that RPCs get routed to the new default +// target after the config has been applied. +func (s) TestConfigUpdate_DefaultTarget(t *testing.T) { + // Start an RLS server and set the throttler to always throttle requests. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) + + // Build RLS service config with a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + backendCh1, backendAddress1 := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = backendAddress1 + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the default target. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh1) + + // Change default_target field of the RLS config. + backendCh2, backendAddress2 := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = backendAddress2 + + // Push the config update through the manual resolver. + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh2) +} + +// TestConfigUpdate_ChildPolicyConfigs verifies that config changes which affect +// child policy configuration are propagated correctly. +func (s) TestConfigUpdate_ChildPolicyConfigs(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Start a default backend and a test backend. + _, defBackendAddress := startBackend(t) + testBackendCh, testBackendAddress := startBackend(t) + + // Set up the RLS server to respond with the test backend. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Set up a test balancer callback to push configs received by child policies. + defBackendConfigsCh := make(chan *e2e.RLSChildPolicyConfig, 1) + testBackendConfigsCh := make(chan *e2e.RLSChildPolicyConfig, 1) + bf := &e2e.BalancerFuncs{ + UpdateClientConnState: func(cfg *e2e.RLSChildPolicyConfig) error { + switch cfg.Backend { + case defBackendAddress: + defBackendConfigsCh <- cfg + case testBackendAddress: + testBackendConfigsCh <- cfg + default: + t.Errorf("Received child policy configs for unknown target %q", cfg.Backend) + } + return nil + }, + } + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName := "test-child-policy" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName, bf) + t.Logf("Registered child policy with name %q", childPolicyName) + + // Build RLS service config with default target. + rlsConfig := buildBasicRLSConfig(childPolicyName, rlsServer.Address) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // At this point, the RLS LB policy should have received its config, and + // should have created a child policy for the default target. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantCfg := &e2e.RLSChildPolicyConfig{Backend: defBackendAddress} + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the default target child policy to receive its config") + case gotCfg := <-defBackendConfigsCh: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("Default target child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } + + // Make an RPC and ensure it gets routed to the test backend. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // As part of handling the above RPC, the RLS LB policy should have created + // a child policy for the test target. + wantCfg = &e2e.RLSChildPolicyConfig{Backend: testBackendAddress} + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the test target child policy to receive its config") + case gotCfg := <-testBackendConfigsCh: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("Test target child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } + + // Push an RLS config update with a change in the child policy config. + childPolicyBuilder := balancer.Get(childPolicyName) + childPolicyParser := childPolicyBuilder.(balancer.ConfigParser) + lbCfg, err := childPolicyParser.ParseConfig([]byte(`{"Random": "random"}`)) + if err != nil { + t.Fatal(err) + } + rlsConfig.ChildPolicy.Config = lbCfg + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + // Expect the child policy for the test backend to receive the update. + wantCfg = &e2e.RLSChildPolicyConfig{ + Backend: testBackendAddress, + Random: "random", + } + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the test target child policy to receive its config") + case gotCfg := <-testBackendConfigsCh: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("Test target child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } + + // Expect the child policy for the default backend to receive the update. + wantCfg = &e2e.RLSChildPolicyConfig{ + Backend: defBackendAddress, + Random: "random", + } + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the default target child policy to receive its config") + case gotCfg := <-defBackendConfigsCh: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("Default target child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } +} + +// TestConfigUpdate_ChildPolicyChange verifies that a child policy change is +// handled by closing the old balancer and creating a new one. +func (s) TestConfigUpdate_ChildPolicyChange(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Set up balancer callbacks. + configsCh1 := make(chan *e2e.RLSChildPolicyConfig, 1) + closeCh1 := make(chan struct{}, 1) + bf := &e2e.BalancerFuncs{ + UpdateClientConnState: func(cfg *e2e.RLSChildPolicyConfig) error { + configsCh1 <- cfg + return nil + }, + Close: func() { + closeCh1 <- struct{}{} + }, + } + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName1 := "test-child-policy-1" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName1, bf) + t.Logf("Registered child policy with name %q", childPolicyName1) + + // Build RLS service config with a dummy default target. + const defaultBackend = "default-backend" + rlsConfig := buildBasicRLSConfig(childPolicyName1, rlsServer.Address) + rlsConfig.RouteLookupConfig.DefaultTarget = defaultBackend + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // At this point, the RLS LB policy should have received its config, and + // should have created a child policy for the default target. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantCfg := &e2e.RLSChildPolicyConfig{Backend: defaultBackend} + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the first child policy to receive its config") + case gotCfg := <-configsCh1: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("First child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } + + // Set up balancer callbacks for the second policy. + configsCh2 := make(chan *e2e.RLSChildPolicyConfig, 1) + bf = &e2e.BalancerFuncs{ + UpdateClientConnState: func(cfg *e2e.RLSChildPolicyConfig) error { + configsCh2 <- cfg + return nil + }, + } + + // Register a second LB policy to act as the child policy for RLS LB policy. + childPolicyName2 := "test-child-policy-2" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName2, bf) + t.Logf("Registered child policy with name %q", childPolicyName2) + + // Push an RLS config update with a change in the child policy name. + rlsConfig.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: childPolicyName2} + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + // The above update should result in the first LB policy being shutdown and + // the second LB policy receiving a config update. + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the first child policy to be shutdown") + case <-closeCh1: + } + + select { + case <-ctx.Done(): + t.Fatal("Timed out when waiting for the second child policy to receive its config") + case gotCfg := <-configsCh2: + if !cmp.Equal(gotCfg, wantCfg) { + t.Fatalf("First child policy received config %+v, want %+v", gotCfg, wantCfg) + } + } +} + +// TestConfigUpdate_BadChildPolicyConfigs tests the scenario where a config +// update is rejected by the child policy. Verifies that the child policy +// wrapper goes "lame" and the error from the child policy is reported back to +// the caller of the RPC. +func (s) TestConfigUpdate_BadChildPolicyConfigs(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Set up the RLS server to respond with a bad target field which is expected + // to cause the child policy's ParseTarget to fail and should result in the LB + // policy creating a lame child policy wrapper. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{e2e.RLSChildPolicyBadTarget}}} + }) + + // Build RLS service config with a default target. This default backend is + // expected to be healthy (even though we don't attempt to route RPCs to it) + // and ensures that the overall connectivity state of the RLS LB policy is not + // TRANSIENT_FAILURE. This is required to make sure that the pick for the bad + // child policy actually gets delegated to the child policy picker. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + _, addr := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = addr + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure that if fails with the expected error. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, e2e.ErrParseConfigBadTarget) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) +} + +// TestConfigUpdate_DataCacheSizeDecrease tests the scenario where a config +// update decreases the data cache size. Verifies that entries are evicted from +// the cache. +func (s) TestConfigUpdate_DataCacheSizeDecrease(t *testing.T) { + // Override the clientConn update hook to get notified. + clientConnUpdateDone := make(chan struct{}, 1) + origClientConnUpdateHook := clientConnUpdateHook + clientConnUpdateHook = func() { clientConnUpdateDone <- struct{}{} } + defer func() { clientConnUpdateHook = origClientConnUpdateHook }() + + // Override the cache entry size func, and always return 1. + origEntrySizeFunc := computeDataCacheEntrySize + computeDataCacheEntrySize = func(cacheKey, *cacheEntry) int64 { return 1 } + defer func() { computeDataCacheEntrySize = origEntrySizeFunc }() + + // Override the minEvictionDuration to ensure that when the config update + // reduces the cache size, the resize operation is not stopped because + // we find an entry whose minExpiryDuration has not elapsed. + origMinEvictDuration := minEvictDuration + minEvictDuration = time.Duration(0) + defer func() { minEvictDuration = origMinEvictDuration }() + + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName := "test-child-policy" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName, nil) + t.Logf("Registered child policy with name %q", childPolicyName) + + // Build RLS service config with header matchers. + rlsConfig := buildBasicRLSConfig(childPolicyName, rlsServer.Address) + + // Start a couple of test backends, and set up the fake RLS server to return + // these as targets in the RLS response, based on request keys. + backendCh1, backendAddress1 := startBackend(t) + backendCh2, backendAddress2 := startBackend(t) + rlsServer.SetResponseCallback(func(ctx context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + if req.KeyMap["k1"] == "v1" { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress1}}} + } + if req.KeyMap["k2"] == "v2" { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress2}}} + } + return &rlstest.RouteLookupResponse{Err: errors.New("no keys in request metadata")} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + <-clientConnUpdateDone + + // Make an RPC and ensure it gets routed to the first backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + ctxOutgoing := metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh1) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC with a different set of headers. This will force the LB + // policy to send out a new RLS request, resulting in a new data cache + // entry. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n2", "v2") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh2) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // We currently have two cache entries. Setting the size to 1, will cause + // the entry corresponding to backend1 to be evicted. + rlsConfig.RouteLookupConfig.CacheSizeBytes = 1 + + // Push the config update through the manual resolver. + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + <-clientConnUpdateDone + + // Make an RPC to match the cache entry which got evicted above, and expect + // an RLS request to be made to fetch the targets. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh1) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) +} + +// TestDataCachePurging verifies that the LB policy periodically evicts expired +// entries from the data cache. +func (s) TestDataCachePurging(t *testing.T) { + // Override the frequency of the data cache purger to a small one. + origDataCachePurgeTicker := dataCachePurgeTicker + ticker := time.NewTicker(defaultTestShortTimeout) + defer ticker.Stop() + dataCachePurgeTicker = func() *time.Ticker { return ticker } + defer func() { dataCachePurgeTicker = origDataCachePurgeTicker }() + + // Override the data cache purge hook to get notified. + dataCachePurgeDone := make(chan struct{}, 1) + origDataCachePurgeHook := dataCachePurgeHook + dataCachePurgeHook = func() { dataCachePurgeDone <- struct{}{} } + defer func() { dataCachePurgeHook = origDataCachePurgeHook }() + + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName := "test-child-policy" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName, nil) + t.Logf("Registered child policy with name %q", childPolicyName) + + // Build RLS service config with header matchers and lookupService pointing to + // the fake RLS server created above. Set a very low value for maxAge to + // ensure that the entry expires soon. + rlsConfig := buildBasicRLSConfig(childPolicyName, rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(time.Millisecond) + + // Start a test backend, and set up the fake RLS server to return this as a + // target in the RLS response. + backendCh, backendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + ctxOutgoing := metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC with different headers. This will force the LB policy to + // send out a new RLS request, resulting in a new data cache entry. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n2", "v2") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Wait for the data cache purging to happen before proceeding. + <-dataCachePurgeDone + + // Perform the same RPCs again and verify that they result in RLS requests. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC with different headers. This will force the LB policy to + // send out a new RLS request, resulting in a new data cache entry. + ctxOutgoing = metadata.AppendToOutgoingContext(ctx, "n2", "v2") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) +} + +// TestControlChannelConnectivityStateMonitoring tests the scenario where the +// control channel goes down and comes back up again and verifies that backoff +// state is reset for cache entries in this scenario. +func (s) TestControlChannelConnectivityStateMonitoring(t *testing.T) { + // Create a restartable listener which can close existing connections. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("net.Listen() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + + // Start an RLS server with the restartable listener and set the throttler to + // never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, lis) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Override the reset backoff hook to get notified. + resetBackoffDone := make(chan struct{}, 1) + origResetBackoffHook := resetBackoffHook + resetBackoffHook = func() { resetBackoffDone <- struct{}{} } + defer func() { resetBackoffHook = origResetBackoffHook }() + + // Override the backoff strategy to return a large backoff which + // will make sure the date cache entry remains in backoff for the + // duration of the test. + origBackoffStrategy := defaultBackoffStrategy + defaultBackoffStrategy = &fakeBackoffStrategy{backoff: defaultTestTimeout} + defer func() { defaultBackoffStrategy = origBackoffStrategy }() + + // Register an LB policy to act as the child policy for RLS LB policy. + childPolicyName := "test-child-policy" + t.Name() + e2e.RegisterRLSChildPolicy(childPolicyName, nil) + t.Logf("Registered child policy with name %q", childPolicyName) + + // Build RLS service config with header matchers, and a very low value for + // maxAge to ensure that cache entries become invalid very soon. + rlsConfig := buildBasicRLSConfig(childPolicyName, rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a test backend, and set up the fake RLS server to return this as a + // target in the RLS response. + backendCh, backendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{backendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Stop the RLS server. + lis.Stop() + + // Make another RPC similar to the first one. Since the above cache entry + // would have expired by now, this should trigger another RLS request. And + // since the RLS server is down, RLS request will fail and the cache entry + // will enter backoff, and we have overridden the default backoff strategy to + // return a value which will keep this entry in backoff for the whole duration + // of the test. + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, nil) + + // Restart the RLS server. + lis.Restart() + + // When we closed the RLS server earlier, the existing transport to the RLS + // server would have closed, and the RLS control channel would have moved to + // TRANSIENT_FAILURE with a subConn backoff before moving to IDLE. This + // backoff will last for about a second. We need to keep retrying RPCs for the + // subConn to eventually come out of backoff and attempt to reconnect. + // + // Make this RPC with a different set of headers leading to the creation of + // a new cache entry and a new RLS request. This RLS request will also fail + // till the control channel comes moves back to READY. So, override the + // backoff strategy to perform a small backoff on this entry. + defaultBackoffStrategy = &fakeBackoffStrategy{backoff: defaultTestShortTimeout} + ctxOutgoing := metadata.AppendToOutgoingContext(ctx, "n1", "v1") + makeTestRPCAndExpectItToReachBackend(ctxOutgoing, t, cc, backendCh) + + select { + case <-ctx.Done(): + t.Fatalf("Timed out waiting for resetBackoffDone") + case <-resetBackoffDone: + } + + // The fact that the above RPC succeeded indicates that the control channel + // has moved back to READY. The connectivity state monitoring code should have + // realized this and should have reset all backoff timers (which in this case + // is the cache entry corresponding to the first RPC). Retrying that RPC now + // should succeed with an RLS request being sent out. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, backendCh) + verifyRLSRequest(t, rlsReqCh, true) +} + +const wrappingTopLevelBalancerName = "wrapping-top-level-balancer" +const multipleUpdateStateChildBalancerName = "multiple-update-state-child-balancer" + +type wrappingTopLevelBalancerBuilder struct { + balCh chan balancer.Balancer +} + +func (w *wrappingTopLevelBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + tlb := &wrappingTopLevelBalancer{ClientConn: cc} + tlb.Balancer = balancer.Get(Name).Build(tlb, balancer.BuildOptions{}) + w.balCh <- tlb + return tlb +} + +func (w *wrappingTopLevelBalancerBuilder) Name() string { + return wrappingTopLevelBalancerName +} + +func (w *wrappingTopLevelBalancerBuilder) ParseConfig(sc json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + parser := balancer.Get(Name).(balancer.ConfigParser) + return parser.ParseConfig(sc) +} + +// wrappingTopLevelBalancer acts as the top-level LB policy on the channel and +// wraps an RLS LB policy. It forwards all balancer API calls unmodified to the +// underlying RLS LB policy. It overrides the UpdateState method on the +// balancer.ClientConn passed to the RLS LB policy and stores all state updates +// pushed by the latter. +type wrappingTopLevelBalancer struct { + balancer.ClientConn + balancer.Balancer + + mu sync.Mutex + states []balancer.State +} + +func (w *wrappingTopLevelBalancer) UpdateState(bs balancer.State) { + w.mu.Lock() + w.states = append(w.states, bs) + w.mu.Unlock() + w.ClientConn.UpdateState(bs) +} + +func (w *wrappingTopLevelBalancer) getStates() []balancer.State { + w.mu.Lock() + defer w.mu.Unlock() + + states := make([]balancer.State, len(w.states)) + copy(states, w.states) + return states +} + +// wrappedPickFirstBalancerBuilder builds a balancer which wraps a pickfirst +// balancer. The wrapping balancing receives addresses to be passed to the +// underlying pickfirst balancer as part of its configuration. +type wrappedPickFirstBalancerBuilder struct{} + +func (wrappedPickFirstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(grpc.PickFirstBalancerName) + wpfb := &wrappedPickFirstBalancer{ + ClientConn: cc, + } + pf := builder.Build(wpfb, opts) + wpfb.Balancer = pf + return wpfb +} + +func (wrappedPickFirstBalancerBuilder) Name() string { + return multipleUpdateStateChildBalancerName +} + +type WrappedPickFirstBalancerConfig struct { + serviceconfig.LoadBalancingConfig + Backend string // The target for which this child policy was created. +} + +func (wbb *wrappedPickFirstBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &WrappedPickFirstBalancerConfig{} + if err := json.Unmarshal(c, cfg); err != nil { + return nil, err + } + return cfg, nil +} + +// wrappedPickFirstBalancer wraps a pickfirst balancer and makes multiple calls +// to UpdateState when handling a config update in UpdateClientConnState. When +// this policy is used as a child policy of the RLS LB policy, it is expected +// that the latter suppress these updates and push a single picker update on the +// channel (after the config has been processed by all child policies). +type wrappedPickFirstBalancer struct { + balancer.Balancer + balancer.ClientConn +} + +func (wb *wrappedPickFirstBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + wb.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &testutils.TestConstPicker{Err: balancer.ErrNoSubConnAvailable}}) + wb.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &testutils.TestConstPicker{Err: balancer.ErrNoSubConnAvailable}}) + + cfg := ccs.BalancerConfig.(*WrappedPickFirstBalancerConfig) + return wb.Balancer.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{{Addr: cfg.Backend}}}, + }) +} + +func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { + // Eat it if IDLE - allows it to switch over only on a READY SubConn. + if state.ConnectivityState == connectivity.Idle { + return + } + wb.ClientConn.UpdateState(state) +} + +// TestUpdateStatePauses tests the scenario where a config update received by +// the RLS LB policy results in multiple UpdateState calls from the child +// policies. This test verifies that picker updates are paused when the config +// update is being processed by RLS LB policy and its child policies. +// +// The test uses a wrapping balancer as the top-level LB policy on the channel. +// The wrapping balancer wraps an RLS LB policy as a child policy and forwards +// all calls to it. It also records the UpdateState() calls from the RLS LB +// policy and makes it available for inspection by the test. +// +// The test uses another wrapped balancer (which wraps a pickfirst balancer) as +// the child policy of the RLS LB policy. This balancer makes multiple +// UpdateState calls when handling an update from its parent in +// UpdateClientConnState. +func (s) TestUpdateStatePauses(t *testing.T) { + // Override the hook to get notified when UpdateClientConnState is done. + clientConnUpdateDone := make(chan struct{}, 1) + origClientConnUpdateHook := clientConnUpdateHook + clientConnUpdateHook = func() { clientConnUpdateDone <- struct{}{} } + defer func() { clientConnUpdateHook = origClientConnUpdateHook }() + + // Register the top-level wrapping balancer which forwards calls to RLS. + bb := &wrappingTopLevelBalancerBuilder{balCh: make(chan balancer.Balancer, 1)} + balancer.Register(bb) + + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Start a test backend and set the RLS server to respond with it. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a child policy which wraps a pickfirst balancer and receives the + // backend address as part of its configuration. + balancer.Register(&wrappedPickFirstBalancerBuilder{}) + + // Register a manual resolver and push the RLS service config through it. + r := manual.NewBuilderWithScheme("rls-e2e") + scJSON := fmt.Sprintf(` +{ + "loadBalancingConfig": [ + { + "%s": { + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [{"service": "grpc.testing.TestService"}] + }], + "lookupService": "%s", + "cacheSizeBytes": 1000 + }, + "childPolicy": [{"%s": {}}], + "childPolicyConfigTargetFieldName": "Backend" + } + } + ] +}`, wrappingTopLevelBalancerName, rlsServer.Address, multipleUpdateStateChildBalancerName) + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.InitialState(resolver.State{ServiceConfig: sc}) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Wait for the clientconn update to be processed by the RLS LB policy. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + case <-clientConnUpdateDone: + } + + // Get the top-level LB policy configured on the channel, to be able to read + // the state updates pushed by its child (the RLS LB policy.) + var wb *wrappingTopLevelBalancer + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for state update on the top-level LB policy") + case b := <-bb.balCh: + wb = b.(*wrappingTopLevelBalancer) + } + + // It is important to note that at this point no child policies have been + // created because we have not attempted any RPC so far. When we attempt an + // RPC (below), child policies will be created and their configs will be + // pushed to them. But this config update will not happen in the context of + // a config update on the parent. + + // Make an RPC and ensure it gets routed to the test backend. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Wait for the control channel to become READY, before reading the states + // out of the wrapping top-level balancer. + // + // makeTestRPCAndExpectItToReachBackend repeatedly sends RPCs with short + // deadlines until one succeeds. See its docstring for details. + // + // The following sequence of events is possible: + // 1. When the first RPC is attempted above, a pending cache entry is + // created, an RLS request is sent out, and the pick is queued. The + // channel is in CONNECTING state. + // 2. When the RLS response arrives, the pending cache entry is moved to the + // data cache, a child policy is created for the target specified in the + // response and a new picker is returned. The channel is still in + // CONNECTING, and retried pick is again queued. + // 3. The child policy moves through the standard set of states, IDLE --> + // CONNECTING --> READY. And for each of these state changes, a new + // picker is sent on the channel. But the overall connectivity state of + // the channel is still CONNECTING. + // 4. Right around the time when the child policy becomes READY, the + // deadline associated with the first RPC made by + // makeTestRPCAndExpectItToReachBackend() could expire, and it could send + // a new one. And because the internal state of the LB policy now + // contains a child policy which is READY, this RPC will succeed. But the + // RLS LB policy has yet to push a new picker on the channel. + // 5. If we read the states seen by the top-level wrapping LB policy without + // waiting for the channel to become READY, there is a possibility that we + // might not see the READY state in there. And if that happens, we will + // see two extra states in the last check made in the test, and thereby + // the test would fail. Waiting for the channel to become READY here + // ensures that the test does not flake because of this rare sequence of + // events. + for s := cc.GetState(); s != connectivity.Ready; s = cc.GetState() { + if !cc.WaitForStateChange(ctx, s) { + t.Fatal("Timeout when waiting for connectivity state to reach READY") + } + } + + // Cache the state changes seen up to this point. + states0 := wb.getStates() + + // Push an updated service config. As mentioned earlier, the previous config + // updates on the child policies did not happen in the context of a config + // update on the parent. Hence, this update is required to force the + // scenario which we are interesting in testing here, i.e child policies get + // config updates as part of the parent policy getting its config update. + scJSON = fmt.Sprintf(` +{ + "loadBalancingConfig": [ + { + "%s": { + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [ + {"service": "grpc.testing.TestService"}, + {"service": "grpc.health.v1.Health"} + ] + }], + "lookupService": "%s", + "cacheSizeBytes": 1000 + }, + "childPolicy": [{"%s": {}}], + "childPolicyConfigTargetFieldName": "Backend" + } + } + ] +}`, wrappingTopLevelBalancerName, rlsServer.Address, multipleUpdateStateChildBalancerName) + sc = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r.UpdateState(resolver.State{ServiceConfig: sc}) + + // Wait for the clientconn update to be processed by the RLS LB policy. + select { + case <-ctx.Done(): + case <-clientConnUpdateDone: + } + + // Even though the child policies used in this test make multiple calls to + // UpdateState as part of handling their configs, we expect the RLS policy + // to inhibit picker updates during this time frame, and send a single + // picker once the config update is completely handled. + states1 := wb.getStates() + if len(states1) != len(states0)+1 { + t.Fatalf("more than one state update seen. before %v, after %v", states0, states1) + } +} diff --git a/balancer/rls/cache.go b/balancer/rls/cache.go new file mode 100644 index 000000000000..d7a6a1a436c6 --- /dev/null +++ b/balancer/rls/cache.go @@ -0,0 +1,361 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "container/list" + "time" + + "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" +) + +// cacheKey represents the key used to uniquely identify an entry in the data +// cache and in the pending requests map. +type cacheKey struct { + // path is the full path of the incoming RPC request. + path string + // keys is a stringified version of the RLS request key map built using the + // RLS keyBuilder. Since maps are not a type which is comparable in Go, it + // cannot be part of the key for another map (entries in the data cache and + // pending requests map are stored in maps). + keys string +} + +// cacheEntry wraps all the data to be stored in a data cache entry. +type cacheEntry struct { + // childPolicyWrappers contains the list of child policy wrappers + // corresponding to the targets returned by the RLS server for this entry. + childPolicyWrappers []*childPolicyWrapper + // headerData is received in the RLS response and is to be sent in the + // X-Google-RLS-Data header for matching RPCs. + headerData string + // expiryTime is the absolute time at which this cache entry entry stops + // being valid. When an RLS request succeeds, this is set to the current + // time plus the max_age field from the LB policy config. + expiryTime time.Time + // staleTime is the absolute time after which this cache entry will be + // proactively refreshed if an incoming RPC matches this entry. When an RLS + // request succeeds, this is set to the current time plus the stale_age from + // the LB policy config. + staleTime time.Time + // earliestEvictTime is the absolute time before which this entry should not + // be evicted from the cache. When a cache entry is created, this is set to + // the current time plus a default value of 5 seconds. This is required to + // make sure that a new entry added to the cache is not evicted before the + // RLS response arrives (usually when the cache is too small). + earliestEvictTime time.Time + + // status stores the RPC status of the previous RLS request for this + // entry. Picks for entries with a non-nil value for this field are failed + // with the error stored here. + status error + // backoffState contains all backoff related state. When an RLS request + // succeeds, backoffState is reset. This state moves between the data cache + // and the pending requests map. + backoffState *backoffState + // backoffTime is the absolute time at which the backoff period for this + // entry ends. When an RLS request fails, this is set to the current time + // plus the backoff value returned by the backoffState. The backoff timer is + // also setup with this value. No new RLS requests are sent out for this + // entry until the backoff period ends. + // + // Set to zero time instant upon a successful RLS response. + backoffTime time.Time + // backoffExpiryTime is the absolute time at which an entry which has gone + // through backoff stops being valid. When an RLS request fails, this is + // set to the current time plus twice the backoff time. The cache expiry + // timer will only delete entries for which both expiryTime and + // backoffExpiryTime are in the past. + // + // Set to zero time instant upon a successful RLS response. + backoffExpiryTime time.Time + + // size stores the size of this cache entry. Used to enforce the cache size + // specified in the LB policy configuration. + size int64 +} + +// backoffState wraps all backoff related state associated with a cache entry. +type backoffState struct { + // retries keeps track of the number of RLS failures, to be able to + // determine the amount of time to backoff before the next attempt. + retries int + // bs is the exponential backoff implementation which returns the amount of + // time to backoff, given the number of retries. + bs backoff.Strategy + // timer fires when the backoff period ends and incoming requests after this + // will trigger a new RLS request. + timer *time.Timer +} + +// lru is a cache implementation with a least recently used eviction policy. +// Internally it uses a doubly linked list, with the least recently used element +// at the front of the list and the most recently used element at the back of +// the list. The value stored in this cache will be of type `cacheKey`. +// +// It is not safe for concurrent access. +type lru struct { + ll *list.List + + // A map from the value stored in the lru to its underlying list element is + // maintained to have a clean API. Without this, a subset of the lru's API + // would accept/return cacheKey while another subset would accept/return + // list elements. + m map[cacheKey]*list.Element +} + +// newLRU creates a new cache with a least recently used eviction policy. +func newLRU() *lru { + return &lru{ + ll: list.New(), + m: make(map[cacheKey]*list.Element), + } +} + +func (l *lru) addEntry(key cacheKey) { + e := l.ll.PushBack(key) + l.m[key] = e +} + +func (l *lru) makeRecent(key cacheKey) { + e := l.m[key] + l.ll.MoveToBack(e) +} + +func (l *lru) removeEntry(key cacheKey) { + e := l.m[key] + l.ll.Remove(e) + delete(l.m, key) +} + +func (l *lru) getLeastRecentlyUsed() cacheKey { + e := l.ll.Front() + if e == nil { + return cacheKey{} + } + return e.Value.(cacheKey) +} + +// dataCache contains a cache of RLS data used by the LB policy to make routing +// decisions. +// +// The dataCache will be keyed by the request's path and keys, represented by +// the `cacheKey` type. It will maintain the cache keys in an `lru` and the +// cache data, represented by the `cacheEntry` type, in a native map. +// +// It is not safe for concurrent access. +type dataCache struct { + maxSize int64 // Maximum allowed size. + currentSize int64 // Current size. + keys *lru // Cache keys maintained in lru order. + entries map[cacheKey]*cacheEntry + logger *internalgrpclog.PrefixLogger + shutdown *grpcsync.Event +} + +func newDataCache(size int64, logger *internalgrpclog.PrefixLogger) *dataCache { + return &dataCache{ + maxSize: size, + keys: newLRU(), + entries: make(map[cacheKey]*cacheEntry), + logger: logger, + shutdown: grpcsync.NewEvent(), + } +} + +// resize changes the maximum allowed size of the data cache. +// +// The return value indicates if an entry with a valid backoff timer was +// evicted. This is important to the RLS LB policy which would send a new picker +// on the channel to re-process any RPCs queued as a result of this backoff +// timer. +func (dc *dataCache) resize(size int64) (backoffCancelled bool) { + if dc.shutdown.HasFired() { + return false + } + + backoffCancelled = false + for dc.currentSize > size { + key := dc.keys.getLeastRecentlyUsed() + entry, ok := dc.entries[key] + if !ok { + // This should never happen. + dc.logger.Errorf("cacheKey %+v not found in the cache while attempting to resize it", key) + break + } + + // When we encounter a cache entry whose minimum expiration time is in + // the future, we abort the LRU pass, which may temporarily leave the + // cache being too large. This is necessary to ensure that in cases + // where the cache is too small, when we receive an RLS Response, we + // keep the resulting cache entry around long enough for the pending + // incoming requests to be re-processed through the new Picker. If we + // didn't do this, then we'd risk throwing away each RLS response as we + // receive it, in which case we would fail to actually route any of our + // incoming requests. + if entry.earliestEvictTime.After(time.Now()) { + dc.logger.Warningf("cachekey %+v is too recent to be evicted. Stopping cache resizing for now", key) + break + } + + // Stop the backoff timer before evicting the entry. + if entry.backoffState != nil && entry.backoffState.timer != nil { + if entry.backoffState.timer.Stop() { + entry.backoffState.timer = nil + backoffCancelled = true + } + } + dc.deleteAndcleanup(key, entry) + } + dc.maxSize = size + return backoffCancelled +} + +// evictExpiredEntries sweeps through the cache and deletes expired entries. An +// expired entry is one for which both the `expiryTime` and `backoffExpiryTime` +// fields are in the past. +// +// The return value indicates if any expired entries were evicted. +// +// The LB policy invokes this method periodically to purge expired entries. +func (dc *dataCache) evictExpiredEntries() bool { + if dc.shutdown.HasFired() { + return false + } + + evicted := false + for key, entry := range dc.entries { + // Only evict entries for which both the data expiration time and + // backoff expiration time fields are in the past. + now := time.Now() + if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { + continue + } + dc.deleteAndcleanup(key, entry) + evicted = true + } + return evicted +} + +// resetBackoffState sweeps through the cache and for entries with a backoff +// state, the backoff timer is cancelled and the backoff state is reset. The +// return value indicates if any entries were mutated in this fashion. +// +// The LB policy invokes this method when the control channel moves from READY +// to TRANSIENT_FAILURE back to READY. See `monitorConnectivityState` method on +// the `controlChannel` type for more details. +func (dc *dataCache) resetBackoffState(newBackoffState *backoffState) bool { + if dc.shutdown.HasFired() { + return false + } + + backoffReset := false + for _, entry := range dc.entries { + if entry.backoffState == nil { + continue + } + if entry.backoffState.timer != nil { + entry.backoffState.timer.Stop() + entry.backoffState.timer = nil + } + entry.backoffState = &backoffState{bs: newBackoffState.bs} + entry.backoffTime = time.Time{} + entry.backoffExpiryTime = time.Time{} + backoffReset = true + } + return backoffReset +} + +// addEntry adds a cache entry for the given key. +// +// Return value backoffCancelled indicates if a cache entry with a valid backoff +// timer was evicted to make space for the current entry. This is important to +// the RLS LB policy which would send a new picker on the channel to re-process +// any RPCs queued as a result of this backoff timer. +// +// Return value ok indicates if entry was successfully added to the cache. +func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled bool, ok bool) { + if dc.shutdown.HasFired() { + return false, false + } + + // Handle the extremely unlikely case that a single entry is bigger than the + // size of the cache. + if entry.size > dc.maxSize { + return false, false + } + dc.entries[key] = entry + dc.currentSize += entry.size + dc.keys.addEntry(key) + // If the new entry makes the cache go over its configured size, remove some + // old entries. + if dc.currentSize > dc.maxSize { + backoffCancelled = dc.resize(dc.maxSize) + } + return backoffCancelled, true +} + +// updateEntrySize updates the size of a cache entry and the current size of the +// data cache. An entry's size can change upon receipt of an RLS response. +func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) { + dc.currentSize -= entry.size + entry.size = newSize + dc.currentSize += entry.size +} + +func (dc *dataCache) getEntry(key cacheKey) *cacheEntry { + if dc.shutdown.HasFired() { + return nil + } + + entry, ok := dc.entries[key] + if !ok { + return nil + } + dc.keys.makeRecent(key) + return entry +} + +func (dc *dataCache) removeEntryForTesting(key cacheKey) { + entry, ok := dc.entries[key] + if !ok { + return + } + dc.deleteAndcleanup(key, entry) +} + +// deleteAndCleanup performs actions required at the time of deleting an entry +// from the data cache. +// - the entry is removed from the map of entries +// - current size of the data cache is update +// - the key is removed from the LRU +func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { + delete(dc.entries, key) + dc.currentSize -= entry.size + dc.keys.removeEntry(key) +} + +func (dc *dataCache) stop() { + for key, entry := range dc.entries { + dc.deleteAndcleanup(key, entry) + } + dc.shutdown.Fire() +} diff --git a/balancer/rls/cache_test.go b/balancer/rls/cache_test.go new file mode 100644 index 000000000000..80185f39c929 --- /dev/null +++ b/balancer/rls/cache_test.go @@ -0,0 +1,243 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/backoff" +) + +var ( + cacheKeys = []cacheKey{ + {path: "0", keys: "a"}, + {path: "1", keys: "b"}, + {path: "2", keys: "c"}, + {path: "3", keys: "d"}, + {path: "4", keys: "e"}, + } + + longDuration = 10 * time.Minute + shortDuration = 1 * time.Millisecond + cacheEntries []*cacheEntry +) + +func initCacheEntries() { + // All entries have a dummy size of 1 to simplify resize operations. + cacheEntries = []*cacheEntry{ + { + // Entry is valid and minimum expiry time has not expired. + expiryTime: time.Now().Add(longDuration), + earliestEvictTime: time.Now().Add(longDuration), + size: 1, + }, + { + // Entry is valid and is in backoff. + expiryTime: time.Now().Add(longDuration), + backoffTime: time.Now().Add(longDuration), + backoffState: &backoffState{timer: time.NewTimer(longDuration)}, + size: 1, + }, + { + // Entry is valid, and not in backoff. + expiryTime: time.Now().Add(longDuration), + size: 1, + }, + { + // Entry is invalid. + expiryTime: time.Time{}.Add(shortDuration), + size: 1, + }, + { + // Entry is invalid valid and backoff has expired. + expiryTime: time.Time{}.Add(shortDuration), + backoffExpiryTime: time.Time{}.Add(shortDuration), + size: 1, + }, + } +} + +func (s) TestLRU_BasicOperations(t *testing.T) { + initCacheEntries() + // Create an LRU and add some entries to it. + lru := newLRU() + for _, k := range cacheKeys { + lru.addEntry(k) + } + + // Get the least recent entry. This should be the first entry we added. + if got, want := lru.getLeastRecentlyUsed(), cacheKeys[0]; got != want { + t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want) + } + + // Iterate through the slice of keys we added earlier, making them the most + // recent entry, one at a time. The least recent entry at that point should + // be the next entry from our slice of keys. + for i, k := range cacheKeys { + lru.makeRecent(k) + + lruIndex := (i + 1) % len(cacheKeys) + if got, want := lru.getLeastRecentlyUsed(), cacheKeys[lruIndex]; got != want { + t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want) + } + } + + // Iterate through the slice of keys we added earlier, removing them one at + // a time The least recent entry at that point should be the next entry from + // our slice of keys, except for the last one because the lru will be empty. + for i, k := range cacheKeys { + lru.removeEntry(k) + + var want cacheKey + if i < len(cacheKeys)-1 { + want = cacheKeys[i+1] + } + if got := lru.getLeastRecentlyUsed(); got != want { + t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want) + } + } +} + +func (s) TestDataCache_BasicOperations(t *testing.T) { + initCacheEntries() + dc := newDataCache(5, nil) + for i, k := range cacheKeys { + dc.addEntry(k, cacheEntries[i]) + } + for i, k := range cacheKeys { + entry := dc.getEntry(k) + if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) { + t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", k, entry, cacheEntries[i]) + } + } +} + +func (s) TestDataCache_AddForcesResize(t *testing.T) { + initCacheEntries() + dc := newDataCache(1, nil) + + // The first entry in cacheEntries has a minimum expiry time in the future. + // This entry would stop the resize operation since we do not evict entries + // whose minimum expiration time is in the future. So, we do not use that + // entry in this test. The entry being added has a running backoff timer. + evicted, ok := dc.addEntry(cacheKeys[1], cacheEntries[1]) + if evicted || !ok { + t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", evicted, ok) + } + + // Add another entry leading to the eviction of the above entry which has a + // running backoff timer. The first return value is expected to be true. + backoffCancelled, ok := dc.addEntry(cacheKeys[2], cacheEntries[2]) + if !backoffCancelled || !ok { + t.Fatalf("dataCache.addEntry() returned (%v, %v) want (true, true)", backoffCancelled, ok) + } + + // Add another entry leading to the eviction of the above entry which does not + // have a running backoff timer. This should evict the above entry, but the + // first return value is expected to be false. + backoffCancelled, ok = dc.addEntry(cacheKeys[3], cacheEntries[3]) + if backoffCancelled || !ok { + t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", backoffCancelled, ok) + } +} + +func (s) TestDataCache_Resize(t *testing.T) { + initCacheEntries() + dc := newDataCache(5, nil) + for i, k := range cacheKeys { + dc.addEntry(k, cacheEntries[i]) + } + + // The first cache entry (with a key of cacheKeys[0]) that we added has an + // earliestEvictTime in the future. As part of the resize operation, we + // traverse the cache in least recently used order, and this will be first + // entry that we will encounter. And since the earliestEvictTime is in the + // future, the resize operation will stop, leaving the cache bigger than + // what was asked for. + if dc.resize(1) { + t.Fatalf("dataCache.resize() returned true, want false") + } + if dc.currentSize != 5 { + t.Fatalf("dataCache.size is %d, want 5", dc.currentSize) + } + + // Remove the entry with earliestEvictTime in the future and retry the + // resize operation. + dc.removeEntryForTesting(cacheKeys[0]) + if !dc.resize(1) { + t.Fatalf("dataCache.resize() returned false, want true") + } + if dc.currentSize != 1 { + t.Fatalf("dataCache.size is %d, want 1", dc.currentSize) + } +} + +func (s) TestDataCache_EvictExpiredEntries(t *testing.T) { + initCacheEntries() + dc := newDataCache(5, nil) + for i, k := range cacheKeys { + dc.addEntry(k, cacheEntries[i]) + } + + // The last two entries in the cacheEntries list have expired, and will be + // evicted. The first three should still remain in the cache. + if !dc.evictExpiredEntries() { + t.Fatal("dataCache.evictExpiredEntries() returned false, want true") + } + if dc.currentSize != 3 { + t.Fatalf("dataCache.size is %d, want 3", dc.currentSize) + } + for i := 0; i < 3; i++ { + entry := dc.getEntry(cacheKeys[i]) + if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) { + t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", cacheKeys[i], entry, cacheEntries[i]) + } + } +} + +func (s) TestDataCache_ResetBackoffState(t *testing.T) { + type fakeBackoff struct { + backoff.Strategy + } + + initCacheEntries() + dc := newDataCache(5, nil) + for i, k := range cacheKeys { + dc.addEntry(k, cacheEntries[i]) + } + + newBackoffState := &backoffState{bs: &fakeBackoff{}} + if updatePicker := dc.resetBackoffState(newBackoffState); !updatePicker { + t.Fatal("dataCache.resetBackoffState() returned updatePicker is false, want true") + } + + // Make sure that the entry with no backoff state was not touched. + if entry := dc.getEntry(cacheKeys[0]); cmp.Equal(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})) { + t.Fatal("dataCache.resetBackoffState() touched entries without a valid backoffState") + } + + // Make sure that the entry with a valid backoff state was reset. + entry := dc.getEntry(cacheKeys[1]) + if diff := cmp.Diff(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})); diff != "" { + t.Fatalf("unexpected diff in backoffState for cache entry after dataCache.resetBackoffState(): %s", diff) + } +} diff --git a/balancer/rls/child_policy.go b/balancer/rls/child_policy.go new file mode 100644 index 000000000000..c74184cac238 --- /dev/null +++ b/balancer/rls/child_policy.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "fmt" + "sync/atomic" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +// childPolicyWrapper is a reference counted wrapper around a child policy. +// +// The LB policy maintains a map of these wrappers keyed by the target returned +// by RLS. When a target is seen for the first time, a child policy wrapper is +// created for it and the wrapper is added to the child policy map. Each entry +// in the data cache holds references to the corresponding child policy +// wrappers. The LB policy also holds a reference to the child policy wrapper +// for the default target specified in the LB Policy Configuration +// +// When a cache entry is evicted, it releases references to the child policy +// wrappers that it contains. When all references have been released, the +// wrapper is removed from the child policy map and is destroyed. +// +// The child policy wrapper also caches the connectivity state and most recent +// picker from the child policy. Once the child policy wrapper reports +// TRANSIENT_FAILURE, it will continue reporting that state until it goes READY; +// transitions from TRANSIENT_FAILURE to CONNECTING are ignored. +// +// Whenever a child policy wrapper changes its connectivity state, the LB policy +// returns a new picker to the channel, since the channel may need to re-process +// the picks for queued RPCs. +// +// It is not safe for concurrent access. +type childPolicyWrapper struct { + logger *internalgrpclog.PrefixLogger + target string // RLS target corresponding to this child policy. + refCnt int // Reference count. + + // Balancer state reported by the child policy. The RLS LB policy maintains + // these child policies in a BalancerGroup. The state reported by the child + // policy is pushed to the state aggregator (which is also implemented by the + // RLS LB policy) and cached here. See handleChildPolicyStateUpdate() for + // details on how the state aggregation is performed. + // + // While this field is written to by the LB policy, it is read by the picker + // at Pick time. Making this an atomic to enable the picker to read this value + // without a mutex. + state unsafe.Pointer // *balancer.State +} + +// newChildPolicyWrapper creates a child policy wrapper for the given target, +// and is initialized with one reference and starts off in CONNECTING state. +func newChildPolicyWrapper(target string) *childPolicyWrapper { + c := &childPolicyWrapper{ + target: target, + refCnt: 1, + state: unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }), + } + c.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-child-policy-wrapper %s %p] ", c.target, c)) + c.logger.Infof("Created") + return c +} + +// acquireRef increments the reference count on the child policy wrapper. +func (c *childPolicyWrapper) acquireRef() { + c.refCnt++ +} + +// releaseRef decrements the reference count on the child policy wrapper. The +// return value indicates whether the released reference was the last one. +func (c *childPolicyWrapper) releaseRef() bool { + c.refCnt-- + return c.refCnt == 0 +} + +// lamify causes the child policy wrapper to return a picker which will always +// fail requests. This is used when the wrapper runs into errors when trying to +// build and parse the child policy configuration. +func (c *childPolicyWrapper) lamify(err error) { + c.logger.Warningf("Entering lame mode: %v", err) + atomic.StorePointer(&c.state, unsafe.Pointer(&balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + })) +} diff --git a/balancer/rls/config.go b/balancer/rls/config.go new file mode 100644 index 000000000000..77b6bdcd1cca --- /dev/null +++ b/balancer/rls/config.go @@ -0,0 +1,312 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + "time" + + "github.com/golang/protobuf/ptypes" + durationpb "github.com/golang/protobuf/ptypes/duration" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/pretty" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/protobuf/encoding/protojson" +) + +const ( + // Default max_age if not specified (or greater than this value) in the + // service config. + maxMaxAge = 5 * time.Minute + // Upper limit for cache_size since we don't fully trust the service config. + maxCacheSize = 5 * 1024 * 1024 * 8 // 5MB in bytes + // Default lookup_service_timeout if not specified in the service config. + defaultLookupServiceTimeout = 10 * time.Second + // Default value for targetNameField in the child policy config during + // service config validation. + dummyChildPolicyTarget = "target_name_to_be_filled_in_later" +) + +// lbConfig is the internal representation of the RLS LB policy's config. +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + cacheSizeBytes int64 // Keep this field 64-bit aligned. + kbMap keys.BuilderMap + lookupService string + lookupServiceTimeout time.Duration + maxAge time.Duration + staleAge time.Duration + defaultTarget string + + childPolicyName string + childPolicyConfig map[string]json.RawMessage + childPolicyTargetField string + controlChannelServiceConfig string +} + +func (lbCfg *lbConfig) Equal(other *lbConfig) bool { + return lbCfg.kbMap.Equal(other.kbMap) && + lbCfg.lookupService == other.lookupService && + lbCfg.lookupServiceTimeout == other.lookupServiceTimeout && + lbCfg.maxAge == other.maxAge && + lbCfg.staleAge == other.staleAge && + lbCfg.cacheSizeBytes == other.cacheSizeBytes && + lbCfg.defaultTarget == other.defaultTarget && + lbCfg.childPolicyName == other.childPolicyName && + lbCfg.childPolicyTargetField == other.childPolicyTargetField && + lbCfg.controlChannelServiceConfig == other.controlChannelServiceConfig && + childPolicyConfigEqual(lbCfg.childPolicyConfig, other.childPolicyConfig) +} + +func childPolicyConfigEqual(a, b map[string]json.RawMessage) bool { + if (b == nil) != (a == nil) { + return false + } + if len(b) != len(a) { + return false + } + for k, jsonA := range a { + jsonB, ok := b[k] + if !ok { + return false + } + if !bytes.Equal(jsonA, jsonB) { + return false + } + } + return true +} + +// This struct resembles the JSON representation of the loadBalancing config +// and makes it easier to unmarshal. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage + RouteLookupChannelServiceConfig json.RawMessage + ChildPolicy []map[string]json.RawMessage + ChildPolicyConfigTargetFieldName string +} + +// ParseConfig parses the JSON load balancer config provided into an +// internal form or returns an error if the config is invalid. +// +// When parsing a config update, the following validations are performed: +// - routeLookupConfig: +// - grpc_keybuilders field: +// - must have at least one entry +// - must not have two entries with the same `Name` +// - within each entry: +// - must have at least one `Name` +// - must not have a `Name` with the `service` field unset or empty +// - within each `headers` entry: +// - must not have `required_match` set +// - must not have `key` unset or empty +// - across all `headers`, `constant_keys` and `extra_keys` fields: +// - must not have the same `key` specified twice +// - no `key` must be the empty string +// - `lookup_service` field must be set and must parse as a target URI +// - if `max_age` > 5m, it should be set to 5 minutes +// - if `stale_age` > `max_age`, ignore it +// - if `stale_age` is set, then `max_age` must also be set +// - ignore `valid_targets` field +// - `cache_size_bytes` field must have a value greater than 0, and if its +// value is greater than 5M, we cap it at 5M +// +// - routeLookupChannelServiceConfig: +// - if specified, must parse as valid service config +// +// - childPolicy: +// - must find a valid child policy with a valid config +// +// - childPolicyConfigTargetFieldName: +// - must be set and non-empty +func (rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + logger.Infof("Received JSON service config: %v", pretty.ToJSON(c)) + cfgJSON := &lbConfigJSON{} + if err := json.Unmarshal(c, cfgJSON); err != nil { + return nil, fmt.Errorf("rls: json unmarshal failed for service config %+v: %v", string(c), err) + } + + m := protojson.UnmarshalOptions{DiscardUnknown: true} + rlsProto := &rlspb.RouteLookupConfig{} + if err := m.Unmarshal(cfgJSON.RouteLookupConfig, rlsProto); err != nil { + return nil, fmt.Errorf("rls: bad RouteLookupConfig proto %+v: %v", string(cfgJSON.RouteLookupConfig), err) + } + lbCfg, err := parseRLSProto(rlsProto) + if err != nil { + return nil, err + } + + if sc := string(cfgJSON.RouteLookupChannelServiceConfig); sc != "" { + parsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(sc) + if parsed.Err != nil { + return nil, fmt.Errorf("rls: bad control channel service config %q: %v", sc, parsed.Err) + } + lbCfg.controlChannelServiceConfig = sc + } + + if cfgJSON.ChildPolicyConfigTargetFieldName == "" { + return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config %+v", string(c)) + } + name, config, err := parseChildPolicyConfigs(cfgJSON.ChildPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) + if err != nil { + return nil, err + } + lbCfg.childPolicyName = name + lbCfg.childPolicyConfig = config + lbCfg.childPolicyTargetField = cfgJSON.ChildPolicyConfigTargetFieldName + return lbCfg, nil +} + +func parseRLSProto(rlsProto *rlspb.RouteLookupConfig) (*lbConfig, error) { + // Validations specified on the `grpc_keybuilders` field are performed here. + kbMap, err := keys.MakeBuilderMap(rlsProto) + if err != nil { + return nil, err + } + + // `lookup_service` field must be set and must parse as a target URI. + lookupService := rlsProto.GetLookupService() + if lookupService == "" { + return nil, fmt.Errorf("rls: empty lookup_service in route lookup config %+v", rlsProto) + } + parsedTarget, err := url.Parse(lookupService) + if err != nil { + // url.Parse() fails if scheme is missing. Retry with default scheme. + parsedTarget, err = url.Parse(resolver.GetDefaultScheme() + ":///" + lookupService) + if err != nil { + return nil, fmt.Errorf("rls: invalid target URI in lookup_service %s", lookupService) + } + } + if parsedTarget.Scheme == "" { + parsedTarget.Scheme = resolver.GetDefaultScheme() + } + if resolver.Get(parsedTarget.Scheme) == nil { + return nil, fmt.Errorf("rls: unregistered scheme in lookup_service %s", lookupService) + } + + lookupServiceTimeout, err := convertDuration(rlsProto.GetLookupServiceTimeout()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in route lookup config %+v: %v", rlsProto, err) + } + if lookupServiceTimeout == 0 { + lookupServiceTimeout = defaultLookupServiceTimeout + } + + // Validations performed here: + // - if `max_age` > 5m, it should be set to 5 minutes + // - if `stale_age` > `max_age`, ignore it + // - if `stale_age` is set, then `max_age` must also be set + maxAge, err := convertDuration(rlsProto.GetMaxAge()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse max_age in route lookup config %+v: %v", rlsProto, err) + } + staleAge, err := convertDuration(rlsProto.GetStaleAge()) + if err != nil { + return nil, fmt.Errorf("rls: failed to parse staleAge in route lookup config %+v: %v", rlsProto, err) + } + if staleAge != 0 && maxAge == 0 { + return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in route lookup config %+v", rlsProto) + } + if staleAge >= maxAge { + logger.Infof("rls: stale_age %v is not less than max_age %v, ignoring it", staleAge, maxAge) + staleAge = 0 + } + if maxAge == 0 || maxAge > maxMaxAge { + logger.Infof("rls: max_age in route lookup config is %v, using %v", maxAge, maxMaxAge) + maxAge = maxMaxAge + } + + // `cache_size_bytes` field must have a value greater than 0, and if its + // value is greater than 5M, we cap it at 5M + cacheSizeBytes := rlsProto.GetCacheSizeBytes() + if cacheSizeBytes <= 0 { + return nil, fmt.Errorf("rls: cache_size_bytes must be set to a non-zero value: %+v", rlsProto) + } + if cacheSizeBytes > maxCacheSize { + logger.Info("rls: cache_size_bytes %v is too large, setting it to: %v", cacheSizeBytes, maxCacheSize) + cacheSizeBytes = maxCacheSize + } + return &lbConfig{ + kbMap: kbMap, + lookupService: lookupService, + lookupServiceTimeout: lookupServiceTimeout, + maxAge: maxAge, + staleAge: staleAge, + cacheSizeBytes: cacheSizeBytes, + defaultTarget: rlsProto.GetDefaultTarget(), + }, nil +} + +// parseChildPolicyConfigs iterates through the list of child policies and picks +// the first registered policy and validates its config. +func parseChildPolicyConfigs(childPolicies []map[string]json.RawMessage, targetFieldName string) (string, map[string]json.RawMessage, error) { + for i, config := range childPolicies { + if len(config) != 1 { + return "", nil, fmt.Errorf("rls: invalid childPolicy: entry %v does not contain exactly 1 policy/config pair: %q", i, config) + } + + var name string + var rawCfg json.RawMessage + for name, rawCfg = range config { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + parser, ok := builder.(balancer.ConfigParser) + if !ok { + return "", nil, fmt.Errorf("rls: childPolicy %q with config %q does not support config parsing", name, string(rawCfg)) + } + + // To validate child policy configs we do the following: + // - unmarshal the raw JSON bytes of the child policy config into a map + // - add an entry with key set to `target_field_name` and a dummy value + // - marshal the map back to JSON and parse the config using the parser + // retrieved previously + var childConfig map[string]json.RawMessage + if err := json.Unmarshal(rawCfg, &childConfig); err != nil { + return "", nil, fmt.Errorf("rls: json unmarshal failed for child policy config %q: %v", string(rawCfg), err) + } + childConfig[targetFieldName], _ = json.Marshal(dummyChildPolicyTarget) + jsonCfg, err := json.Marshal(childConfig) + if err != nil { + return "", nil, fmt.Errorf("rls: json marshal failed for child policy config {%+v}: %v", childConfig, err) + } + if _, err := parser.ParseConfig(jsonCfg); err != nil { + return "", nil, fmt.Errorf("rls: childPolicy config validation failed: %v", err) + } + return name, childConfig, nil + } + return "", nil, fmt.Errorf("rls: invalid childPolicy config: no supported policies found in %+v", childPolicies) +} + +func convertDuration(d *durationpb.Duration) (time.Duration, error) { + if d == nil { + return 0, nil + } + return ptypes.Duration(d) +} diff --git a/balancer/rls/internal/config_test.go b/balancer/rls/config_test.go similarity index 60% rename from balancer/rls/internal/config_test.go rename to balancer/rls/config_test.go index 1efd054512b2..86cfcad74935 100644 --- a/balancer/rls/internal/config_test.go +++ b/balancer/rls/config_test.go @@ -25,27 +25,10 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/grpclb" // grpclb for config parsing. _ "google.golang.org/grpc/internal/resolver/passthrough" // passthrough resolver. ) -const balancerWithoutConfigParserName = "dummy_balancer" - -type dummyBB struct { - balancer.Builder -} - -func (*dummyBB) Name() string { - return balancerWithoutConfigParserName -} - -func init() { - balancer.Register(&dummyBB{}) -} - // testEqual reports whether the lbCfgs a and b are equal. This is to be used // only from tests. This ignores the keyBuilderMap field because its internals // are not exported, and hence not possible to specify in the want section of @@ -58,25 +41,29 @@ func testEqual(a, b *lbConfig) bool { a.staleAge == b.staleAge && a.cacheSizeBytes == b.cacheSizeBytes && a.defaultTarget == b.defaultTarget && - a.cpName == b.cpName && - a.cpTargetField == b.cpTargetField && - cmp.Equal(a.cpConfig, b.cpConfig) + a.controlChannelServiceConfig == b.controlChannelServiceConfig && + a.childPolicyName == b.childPolicyName && + a.childPolicyTargetField == b.childPolicyTargetField && + childPolicyConfigEqual(a.childPolicyConfig, b.childPolicyConfig) } -func TestParseConfig(t *testing.T) { +// TestParseConfig verifies successful config parsing scenarios. +func (s) TestParseConfig(t *testing.T) { + childPolicyTargetFieldVal, _ := json.Marshal(dummyChildPolicyTarget) tests := []struct { desc string input []byte wantCfg *lbConfig }{ - // This input validates a few cases: - // - A top-level unknown field should not fail. - // - An unknown field in routeLookupConfig proto should not fail. - // - lookupServiceTimeout is set to its default value, since it is not specified in the input. - // - maxAge is set to maxMaxAge since the value is too large in the input. - // - staleAge is ignore because it is higher than maxAge in the input. { - desc: "with transformations", + // This input validates a few cases: + // - A top-level unknown field should not fail. + // - An unknown field in routeLookupConfig proto should not fail. + // - lookupServiceTimeout is set to its default value, since it is not specified in the input. + // - maxAge is set to maxMaxAge since the value is too large in the input. + // - staleAge is ignore because it is higher than maxAge in the input. + // - cacheSizeBytes is greater than the hard upper limit of 5MB + desc: "with transformations 1", input: []byte(`{ "top-level-unknown-field": "unknown-value", "routeLookupConfig": { @@ -85,10 +72,10 @@ func TestParseConfig(t *testing.T) { "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], - "lookupService": "passthrough:///target", + "lookupService": ":///target", "maxAge" : "500s", "staleAge": "600s", - "cacheSizeBytes": 1000, + "cacheSizeBytes": 100000000, "defaultTarget": "passthrough:///default" }, "childPolicy": [ @@ -96,18 +83,21 @@ func TestParseConfig(t *testing.T) { {"unknown-policy": {"unknown-field": "unknown-value"}}, {"grpclb": {"childPolicy": [{"pickfirst": {}}]}} ], - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantCfg: &lbConfig{ - lookupService: "passthrough:///target", - lookupServiceTimeout: 10 * time.Second, // This is the default value. - maxAge: 5 * time.Minute, // This is max maxAge. - staleAge: time.Duration(0), // StaleAge is ignore because it was higher than maxAge. - cacheSizeBytes: 1000, - defaultTarget: "passthrough:///default", - cpName: "grpclb", - cpTargetField: "service_name", - cpConfig: map[string]json.RawMessage{"childPolicy": json.RawMessage(`[{"pickfirst": {}}]`)}, + lookupService: ":///target", + lookupServiceTimeout: 10 * time.Second, // This is the default value. + maxAge: 5 * time.Minute, // This is max maxAge. + staleAge: time.Duration(0), // StaleAge is ignore because it was higher than maxAge. + cacheSizeBytes: maxCacheSize, + defaultTarget: "passthrough:///default", + childPolicyName: "grpclb", + childPolicyTargetField: "serviceName", + childPolicyConfig: map[string]json.RawMessage{ + "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), + "serviceName": json.RawMessage(childPolicyTargetFieldVal), + }, }, }, { @@ -118,31 +108,36 @@ func TestParseConfig(t *testing.T) { "names": [{"service": "service", "method": "method"}], "headers": [{"key": "k1", "names": ["v1"]}] }], - "lookupService": "passthrough:///target", + "lookupService": "target", "lookupServiceTimeout" : "100s", "maxAge": "60s", "staleAge" : "50s", "cacheSizeBytes": 1000, "defaultTarget": "passthrough:///default" }, + "routeLookupChannelServiceConfig": {"loadBalancingConfig": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}]}, "childPolicy": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}], - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantCfg: &lbConfig{ - lookupService: "passthrough:///target", - lookupServiceTimeout: 100 * time.Second, - maxAge: 60 * time.Second, - staleAge: 50 * time.Second, - cacheSizeBytes: 1000, - defaultTarget: "passthrough:///default", - cpName: "grpclb", - cpTargetField: "service_name", - cpConfig: map[string]json.RawMessage{"childPolicy": json.RawMessage(`[{"pickfirst": {}}]`)}, + lookupService: "target", + lookupServiceTimeout: 100 * time.Second, + maxAge: 60 * time.Second, + staleAge: 50 * time.Second, + cacheSizeBytes: 1000, + defaultTarget: "passthrough:///default", + controlChannelServiceConfig: `{"loadBalancingConfig": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}]}`, + childPolicyName: "grpclb", + childPolicyTargetField: "serviceName", + childPolicyConfig: map[string]json.RawMessage{ + "childPolicy": json.RawMessage(`[{"pickfirst": {}}]`), + "serviceName": json.RawMessage(childPolicyTargetFieldVal), + }, }, }, } - builder := &rlsBB{} + builder := rlsBB{} for _, test := range tests { t.Run(test.desc, func(t *testing.T) { lbCfg, err := builder.ParseConfig(test.input) @@ -153,7 +148,8 @@ func TestParseConfig(t *testing.T) { } } -func TestParseConfigErrors(t *testing.T) { +// TestParseConfigErrors verifies config parsing failure scenarios. +func (s) TestParseConfigErrors(t *testing.T) { tests := []struct { desc string input []byte @@ -191,10 +187,10 @@ func TestParseConfigErrors(t *testing.T) { }] } }`), - wantErr: "rls: empty lookup_service in service config", + wantErr: "rls: empty lookup_service in route lookup config", }, { - desc: "invalid lookup service URI", + desc: "unregistered scheme in lookup service URI", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ @@ -204,7 +200,7 @@ func TestParseConfigErrors(t *testing.T) { "lookupService": "badScheme:///target" } }`), - wantErr: "rls: invalid target URI in lookup_service", + wantErr: "rls: unregistered scheme in lookup_service", }, { desc: "invalid lookup service timeout", @@ -218,7 +214,7 @@ func TestParseConfigErrors(t *testing.T) { "lookupServiceTimeout" : "315576000001s" } }`), - wantErr: "bad Duration: time: invalid duration", + wantErr: "google.protobuf.Duration value out of range", }, { desc: "invalid max age", @@ -233,7 +229,7 @@ func TestParseConfigErrors(t *testing.T) { "maxAge" : "315576000001s" } }`), - wantErr: "bad Duration: time: invalid duration", + wantErr: "google.protobuf.Duration value out of range", }, { desc: "invalid stale age", @@ -249,7 +245,7 @@ func TestParseConfigErrors(t *testing.T) { "staleAge" : "315576000001s" } }`), - wantErr: "bad Duration: time: invalid duration", + wantErr: "google.protobuf.Duration value out of range", }, { desc: "invalid max age stale age combo", @@ -264,10 +260,10 @@ func TestParseConfigErrors(t *testing.T) { "staleAge" : "10s" } }`), - wantErr: "rls: stale_age is set, but max_age is not in service config", + wantErr: "rls: stale_age is set, but max_age is not in route lookup config", }, { - desc: "invalid cache size", + desc: "cache_size_bytes field is not set", input: []byte(`{ "routeLookupConfig": { "grpcKeybuilders": [{ @@ -277,10 +273,56 @@ func TestParseConfigErrors(t *testing.T) { "lookupService": "passthrough:///target", "lookupServiceTimeout" : "10s", "maxAge": "30s", - "staleAge" : "25s" - } + "staleAge" : "25s", + "defaultTarget": "passthrough:///default" + }, + "childPolicyConfigTargetFieldName": "serviceName" + }`), + wantErr: "rls: cache_size_bytes must be set to a non-zero value", + }, + { + desc: "routeLookupChannelServiceConfig is not in service config format", + input: []byte(`{ + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [{"service": "service", "method": "method"}], + "headers": [{"key": "k1", "names": ["v1"]}] + }], + "lookupService": "target", + "lookupServiceTimeout" : "100s", + "maxAge": "60s", + "staleAge" : "50s", + "cacheSizeBytes": 1000, + "defaultTarget": "passthrough:///default" + }, + "routeLookupChannelServiceConfig": "unknown", + "childPolicy": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}], + "childPolicyConfigTargetFieldName": "serviceName" + }`), + wantErr: "cannot unmarshal string into Go value of type grpc.jsonSC", + }, + { + desc: "routeLookupChannelServiceConfig contains unknown LB policy", + input: []byte(`{ + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [{"service": "service", "method": "method"}], + "headers": [{"key": "k1", "names": ["v1"]}] + }], + "lookupService": "target", + "lookupServiceTimeout" : "100s", + "maxAge": "60s", + "staleAge" : "50s", + "cacheSizeBytes": 1000, + "defaultTarget": "passthrough:///default" + }, + "routeLookupChannelServiceConfig": { + "loadBalancingConfig": [{"not_a_balancer1": {} }, {"not_a_balancer2": {}}] + }, + "childPolicy": [{"grpclb": {"childPolicy": [{"pickfirst": {}}]}}], + "childPolicyConfigTargetFieldName": "serviceName" }`), - wantErr: "rls: cache_size_bytes must be greater than 0 in service config", + wantErr: "invalid loadBalancingConfig: no supported policies found", }, { desc: "no child policy", @@ -296,9 +338,10 @@ func TestParseConfigErrors(t *testing.T) { "staleAge" : "25s", "cacheSizeBytes": 1000, "defaultTarget": "passthrough:///default" - } + }, + "childPolicyConfigTargetFieldName": "serviceName" }`), - wantErr: "rls: childPolicy is invalid in service config", + wantErr: "rls: invalid childPolicy config: no supported policies found", }, { desc: "no known child policy", @@ -318,9 +361,35 @@ func TestParseConfigErrors(t *testing.T) { "childPolicy": [ {"cds_experimental": {"Cluster": "my-fav-cluster"}}, {"unknown-policy": {"unknown-field": "unknown-value"}} - ] + ], + "childPolicyConfigTargetFieldName": "serviceName" }`), - wantErr: "rls: childPolicy is invalid in service config", + wantErr: "rls: invalid childPolicy config: no supported policies found", + }, + { + desc: "invalid child policy config - more than one entry in map", + input: []byte(`{ + "routeLookupConfig": { + "grpcKeybuilders": [{ + "names": [{"service": "service", "method": "method"}], + "headers": [{"key": "k1", "names": ["v1"]}] + }], + "lookupService": "passthrough:///target", + "lookupServiceTimeout" : "10s", + "maxAge": "30s", + "staleAge" : "25s", + "cacheSizeBytes": 1000, + "defaultTarget": "passthrough:///default" + }, + "childPolicy": [ + { + "cds_experimental": {"Cluster": "my-fav-cluster"}, + "unknown-policy": {"unknown-field": "unknown-value"} + } + ], + "childPolicyConfigTargetFieldName": "serviceName" + }`), + wantErr: "does not contain exactly 1 policy/config pair", }, { desc: "no childPolicyConfigTargetFieldName", @@ -365,13 +434,13 @@ func TestParseConfigErrors(t *testing.T) { {"unknown-policy": {"unknown-field": "unknown-value"}}, {"grpclb": {"childPolicy": "not-an-array"}} ], - "childPolicyConfigTargetFieldName": "service_name" + "childPolicyConfigTargetFieldName": "serviceName" }`), wantErr: "rls: childPolicy config validation failed", }, } - builder := &rlsBB{} + builder := rlsBB{} for _, test := range tests { t.Run(test.desc, func(t *testing.T) { lbCfg, err := builder.ParseConfig(test.input) @@ -381,60 +450,3 @@ func TestParseConfigErrors(t *testing.T) { }) } } - -func TestValidateChildPolicyConfig(t *testing.T) { - jsonCfg := json.RawMessage(`[{"round_robin" : {}}, {"pick_first" : {}}]`) - wantChildConfig := map[string]json.RawMessage{"childPolicy": jsonCfg} - cp := &loadBalancingConfig{ - Name: "grpclb", - Config: []byte(`{"childPolicy": [{"round_robin" : {}}, {"pick_first" : {}}]}`), - } - cpTargetField := "serviceName" - - gotChildConfig, err := validateChildPolicyConfig(cp, cpTargetField) - if err != nil || !cmp.Equal(gotChildConfig, wantChildConfig) { - t.Errorf("validateChildPolicyConfig(%v, %v) = {%v, %v}, want {%v, nil}", cp, cpTargetField, gotChildConfig, err, wantChildConfig) - } -} - -func TestValidateChildPolicyConfigErrors(t *testing.T) { - tests := []struct { - desc string - cp *loadBalancingConfig - wantErrPrefix string - }{ - { - desc: "unknown child policy", - cp: &loadBalancingConfig{ - Name: "unknown", - Config: []byte(`{}`), - }, - wantErrPrefix: "rls: balancer builder not found for child_policy", - }, - { - desc: "balancer builder does not implement ConfigParser", - cp: &loadBalancingConfig{ - Name: balancerWithoutConfigParserName, - Config: []byte(`{}`), - }, - wantErrPrefix: "rls: balancer builder for child_policy does not implement balancer.ConfigParser", - }, - { - desc: "child policy config parsing failure", - cp: &loadBalancingConfig{ - Name: "grpclb", - Config: []byte(`{"childPolicy": "not-an-array"}`), - }, - wantErrPrefix: "rls: childPolicy config validation failed", - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - gotChildConfig, gotErr := validateChildPolicyConfig(test.cp, "") - if gotChildConfig != nil || !strings.HasPrefix(fmt.Sprint(gotErr), test.wantErrPrefix) { - t.Errorf("validateChildPolicyConfig(%v) = {%v, %v}, want {nil, %v}", test.cp, gotChildConfig, gotErr, test.wantErrPrefix) - } - }) - } -} diff --git a/balancer/rls/control_channel.go b/balancer/rls/control_channel.go new file mode 100644 index 000000000000..4acc11d90e94 --- /dev/null +++ b/balancer/rls/control_channel.go @@ -0,0 +1,220 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/adaptive" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" +) + +var newAdaptiveThrottler = func() adaptiveThrottler { return adaptive.New() } + +type adaptiveThrottler interface { + ShouldThrottle() bool + RegisterBackendResponse(throttled bool) +} + +// controlChannel is a wrapper around the gRPC channel to the RLS server +// specified in the service config. +type controlChannel struct { + // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB + // policy receives this value in its service config. + rpcTimeout time.Duration + // backToReadyFunc is a callback to be invoked when the connectivity state + // changes from READY --> TRANSIENT_FAILURE --> READY. + backToReadyFunc func() + // throttler in an adaptive throttling implementation used to avoid + // hammering the RLS service while it is overloaded or down. + throttler adaptiveThrottler + + cc *grpc.ClientConn + client rlsgrpc.RouteLookupServiceClient + logger *internalgrpclog.PrefixLogger +} + +// newControlChannel creates a controlChannel to rlsServerName and uses +// serviceConfig, if non-empty, as the default service config for the underlying +// gRPC channel. +func newControlChannel(rlsServerName, serviceConfig string, rpcTimeout time.Duration, bOpts balancer.BuildOptions, backToReadyFunc func()) (*controlChannel, error) { + ctrlCh := &controlChannel{ + rpcTimeout: rpcTimeout, + backToReadyFunc: backToReadyFunc, + throttler: newAdaptiveThrottler(), + } + ctrlCh.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-control-channel %p] ", ctrlCh)) + + dopts, err := ctrlCh.dialOpts(bOpts, serviceConfig) + if err != nil { + return nil, err + } + ctrlCh.cc, err = grpc.Dial(rlsServerName, dopts...) + if err != nil { + return nil, err + } + ctrlCh.client = rlsgrpc.NewRouteLookupServiceClient(ctrlCh.cc) + ctrlCh.logger.Infof("Control channel created to RLS server at: %v", rlsServerName) + + go ctrlCh.monitorConnectivityState() + return ctrlCh, nil +} + +// dialOpts constructs the dial options for the control plane channel. +func (cc *controlChannel) dialOpts(bOpts balancer.BuildOptions, serviceConfig string) ([]grpc.DialOption, error) { + // The control plane channel will use the same authority as the parent + // channel for server authorization. This ensures that the identity of the + // RLS server and the identity of the backends is the same, so if the RLS + // config is injected by an attacker, it cannot cause leakage of private + // information contained in headers set by the application. + dopts := []grpc.DialOption{grpc.WithAuthority(bOpts.Authority)} + if bOpts.Dialer != nil { + dopts = append(dopts, grpc.WithContextDialer(bOpts.Dialer)) + } + + // The control channel will use the channel credentials from the parent + // channel, including any call creds associated with the channel creds. + var credsOpt grpc.DialOption + switch { + case bOpts.DialCreds != nil: + credsOpt = grpc.WithTransportCredentials(bOpts.DialCreds.Clone()) + case bOpts.CredsBundle != nil: + // The "fallback" mode in google default credentials (which is the only + // type of credentials we expect to be used with RLS) uses TLS/ALTS + // creds for transport and uses the same call creds as that on the + // parent bundle. + bundle, err := bOpts.CredsBundle.NewWithMode(internal.CredsBundleModeFallback) + if err != nil { + return nil, err + } + credsOpt = grpc.WithCredentialsBundle(bundle) + default: + cc.logger.Warningf("no credentials available, using Insecure") + credsOpt = grpc.WithTransportCredentials(insecure.NewCredentials()) + } + dopts = append(dopts, credsOpt) + + // If the RLS LB policy's configuration specified a service config for the + // control channel, use that and disable service config fetching via the name + // resolver for the control channel. + if serviceConfig != "" { + cc.logger.Infof("Disabling service config from the name resolver and instead using: %s", serviceConfig) + dopts = append(dopts, grpc.WithDisableServiceConfig(), grpc.WithDefaultServiceConfig(serviceConfig)) + } + + return dopts, nil +} + +func (cc *controlChannel) monitorConnectivityState() { + cc.logger.Infof("Starting connectivity state monitoring goroutine") + // Since we use two mechanisms to deal with RLS server being down: + // - adaptive throttling for the channel as a whole + // - exponential backoff on a per-request basis + // we need a way to avoid double-penalizing requests by counting failures + // toward both mechanisms when the RLS server is unreachable. + // + // To accomplish this, we monitor the state of the control plane channel. If + // the state has been TRANSIENT_FAILURE since the last time it was in state + // READY, and it then transitions into state READY, we push on a channel + // which is being read by the LB policy. + // + // The LB the policy will iterate through the cache to reset the backoff + // timeouts in all cache entries. Specifically, this means that it will + // reset the backoff state and cancel the pending backoff timer. Note that + // when cancelling the backoff timer, just like when the backoff timer fires + // normally, a new picker is returned to the channel, to force it to + // re-process any wait-for-ready RPCs that may still be queued if we failed + // them while we were in backoff. However, we should optimize this case by + // returning only one new picker, regardless of how many backoff timers are + // cancelled. + + // Using the background context is fine here since we check for the ClientConn + // entering SHUTDOWN and return early in that case. + ctx := context.Background() + + first := true + for { + // Wait for the control channel to become READY. + for s := cc.cc.GetState(); s != connectivity.Ready; s = cc.cc.GetState() { + if s == connectivity.Shutdown { + return + } + cc.cc.WaitForStateChange(ctx, s) + } + cc.logger.Infof("Connectivity state is READY") + + if !first { + cc.logger.Infof("Control channel back to READY") + cc.backToReadyFunc() + } + first = false + + // Wait for the control channel to move out of READY. + cc.cc.WaitForStateChange(ctx, connectivity.Ready) + if cc.cc.GetState() == connectivity.Shutdown { + return + } + cc.logger.Infof("Connectivity state is %s", cc.cc.GetState()) + } +} + +func (cc *controlChannel) close() { + cc.logger.Infof("Closing control channel") + cc.cc.Close() +} + +type lookupCallback func(targets []string, headerData string, err error) + +// lookup starts a RouteLookup RPC in a separate goroutine and returns the +// results (and error, if any) in the provided callback. +// +// The returned boolean indicates whether the request was throttled by the +// client-side adaptive throttling algorithm in which case the provided callback +// will not be invoked. +func (cc *controlChannel) lookup(reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string, cb lookupCallback) (throttled bool) { + if cc.throttler.ShouldThrottle() { + cc.logger.Infof("RLS request throttled by client-side adaptive throttling") + return true + } + go func() { + req := &rlspb.RouteLookupRequest{ + TargetType: "grpc", + KeyMap: reqKeys, + Reason: reason, + StaleHeaderData: staleHeaders, + } + cc.logger.Infof("Sending RLS request %+v", pretty.ToJSON(req)) + + ctx, cancel := context.WithTimeout(context.Background(), cc.rpcTimeout) + defer cancel() + resp, err := cc.client.RouteLookup(ctx, req) + cb(resp.GetTargets(), resp.GetHeaderData(), err) + }() + return false +} diff --git a/balancer/rls/control_channel_test.go b/balancer/rls/control_channel_test.go new file mode 100644 index 000000000000..d401d4cb6de8 --- /dev/null +++ b/balancer/rls/control_channel_test.go @@ -0,0 +1,465 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "os" + "regexp" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlstest "google.golang.org/grpc/internal/testutils/rls" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/grpc/testdata" + "google.golang.org/protobuf/proto" +) + +// TestControlChannelThrottled tests the case where the adaptive throttler +// indicates that the control channel needs to be throttled. +func (s) TestControlChannelThrottled(t *testing.T) { + // Start an RLS server and set the throttler to always throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) + + // Create a control channel to the fake RLS server. + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, balancer.BuildOptions{}, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect the attempt to be throttled. + ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, nil) + + select { + case <-rlsReqCh: + t.Fatal("RouteLookup RPC invoked when control channel is throtlled") + case <-time.After(defaultTestShortTimeout): + } +} + +// TestLookupFailure tests the case where the RLS server responds with an error. +func (s) TestLookupFailure(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Setup the RLS server to respond with errors. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Err: errors.New("rls failure")} + }) + + // Create a control channel to the fake RLS server. + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, balancer.BuildOptions{}, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect the callback to be invoked with an error. + errCh := make(chan error, 1) + ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(_ []string, _ string, err error) { + if err == nil { + errCh <- errors.New("rlsClient.lookup() succeeded, should have failed") + return + } + errCh <- nil + }) + + select { + case <-time.After(defaultTestTimeout): + t.Fatal("timeout when waiting for lookup callback to be invoked") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} + +// TestLookupDeadlineExceeded tests the case where the RLS server does not +// respond within the configured rpc timeout. +func (s) TestLookupDeadlineExceeded(t *testing.T) { + // A unary interceptor which returns a status error with DeadlineExceeded. + interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + return nil, status.Error(codes.DeadlineExceeded, "deadline exceeded") + } + + // Start an RLS server and set the throttler to never throttle. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Create a control channel with a small deadline. + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestShortTimeout, balancer.BuildOptions{}, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect the callback to be invoked with an error. + errCh := make(chan error) + ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(_ []string, _ string, err error) { + if st, ok := status.FromError(err); !ok || st.Code() != codes.DeadlineExceeded { + errCh <- fmt.Errorf("rlsClient.lookup() returned error: %v, want %v", err, codes.DeadlineExceeded) + return + } + errCh <- nil + }) + + select { + case <-time.After(defaultTestTimeout): + t.Fatal("timeout when waiting for lookup callback to be invoked") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} + +// testCredsBundle wraps a test call creds and real transport creds. +type testCredsBundle struct { + transportCreds credentials.TransportCredentials + callCreds credentials.PerRPCCredentials +} + +func (f *testCredsBundle) TransportCredentials() credentials.TransportCredentials { + return f.transportCreds +} + +func (f *testCredsBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return f.callCreds +} + +func (f *testCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) { + if mode != internal.CredsBundleModeFallback { + return nil, fmt.Errorf("unsupported mode: %v", mode) + } + return &testCredsBundle{ + transportCreds: f.transportCreds, + callCreds: f.callCreds, + }, nil +} + +var ( + // Call creds sent by the testPerRPCCredentials on the client, and verified + // by an interceptor on the server. + perRPCCredsData = map[string]string{ + "test-key": "test-value", + "test-key-bin": string([]byte{1, 2, 3}), + } +) + +type testPerRPCCredentials struct { + callCreds map[string]string +} + +func (f *testPerRPCCredentials) GetRequestMetadata(context.Context, ...string) (map[string]string, error) { + return f.callCreds, nil +} + +func (f *testPerRPCCredentials) RequireTransportSecurity() bool { + return true +} + +// Unary server interceptor which validates if the RPC contains call credentials +// which match `perRPCCredsData +func callCredsValidatingServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.PermissionDenied, "didn't find metadata in context") + } + for k, want := range perRPCCredsData { + got, ok := md[k] + if !ok { + return ctx, status.Errorf(codes.PermissionDenied, "didn't find call creds key %v in context", k) + } + if got[0] != want { + return ctx, status.Errorf(codes.PermissionDenied, "for key %v, got value %v, want %v", k, got, want) + } + } + return handler(ctx, req) +} + +// makeTLSCreds is a test helper which creates a TLS based transport credentials +// from files specified in the arguments. +func makeTLSCreds(t *testing.T, certPath, keyPath, rootsPath string) credentials.TransportCredentials { + cert, err := tls.LoadX509KeyPair(testdata.Path(certPath), testdata.Path(keyPath)) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(%q, %q) failed: %v", certPath, keyPath, err) + } + b, err := os.ReadFile(testdata.Path(rootsPath)) + if err != nil { + t.Fatalf("os.ReadFile(%q) failed: %v", rootsPath, err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + t.Fatal("failed to append certificates") + } + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + }) +} + +const ( + wantHeaderData = "headerData" + staleHeaderData = "staleHeaderData" +) + +var ( + keyMap = map[string]string{ + "k1": "v1", + "k2": "v2", + } + wantTargets = []string{"us_east_1.firestore.googleapis.com"} + lookupRequest = &rlspb.RouteLookupRequest{ + TargetType: "grpc", + KeyMap: keyMap, + Reason: rlspb.RouteLookupRequest_REASON_MISS, + StaleHeaderData: staleHeaderData, + } + lookupResponse = &rlstest.RouteLookupResponse{ + Resp: &rlspb.RouteLookupResponse{ + Targets: wantTargets, + HeaderData: wantHeaderData, + }, + } +) + +func testControlChannelCredsSuccess(t *testing.T, sopts []grpc.ServerOption, bopts balancer.BuildOptions) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, sopts...) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Setup the RLS server to respond with a valid response. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return lookupResponse + }) + + // Verify that the request received by the RLS matches the expected one. + rlsServer.SetRequestCallback(func(got *rlspb.RouteLookupRequest) { + if diff := cmp.Diff(lookupRequest, got, cmp.Comparer(proto.Equal)); diff != "" { + t.Errorf("RouteLookupRequest diff (-want, +got):\n%s", diff) + } + }) + + // Create a control channel to the fake server. + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, bopts, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect a successful callback invocation. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + errCh := make(chan error, 1) + ctrlCh.lookup(keyMap, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(targets []string, headerData string, err error) { + if err != nil { + errCh <- fmt.Errorf("rlsClient.lookup() failed with err: %v", err) + return + } + if !cmp.Equal(targets, wantTargets) || headerData != wantHeaderData { + errCh <- fmt.Errorf("rlsClient.lookup() = (%v, %s), want (%v, %s)", targets, headerData, wantTargets, wantHeaderData) + return + } + errCh <- nil + }) + + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for lookup callback to be invoked") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} + +// TestControlChannelCredsSuccess tests creation of the control channel with +// different credentials, which are expected to succeed. +func (s) TestControlChannelCredsSuccess(t *testing.T) { + serverCreds := makeTLSCreds(t, "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + clientCreds := makeTLSCreds(t, "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + + tests := []struct { + name string + sopts []grpc.ServerOption + bopts balancer.BuildOptions + }{ + { + name: "insecure", + sopts: nil, + bopts: balancer.BuildOptions{}, + }, + { + name: "transport creds only", + sopts: []grpc.ServerOption{grpc.Creds(serverCreds)}, + bopts: balancer.BuildOptions{ + DialCreds: clientCreds, + Authority: "x.test.example.com", + }, + }, + { + name: "creds bundle", + sopts: []grpc.ServerOption{ + grpc.Creds(serverCreds), + grpc.UnaryInterceptor(callCredsValidatingServerInterceptor), + }, + bopts: balancer.BuildOptions{ + CredsBundle: &testCredsBundle{ + transportCreds: clientCreds, + callCreds: &testPerRPCCredentials{callCreds: perRPCCredsData}, + }, + Authority: "x.test.example.com", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testControlChannelCredsSuccess(t, test.sopts, test.bopts) + }) + } +} + +func testControlChannelCredsFailure(t *testing.T, sopts []grpc.ServerOption, bopts balancer.BuildOptions, wantCode codes.Code, wantErrRegex *regexp.Regexp) { + // StartFakeRouteLookupServer a fake server. + // + // Start an RLS server and set the throttler to never throttle requests. The + // creds failures happen before the RPC handler on the server is invoked. + // So, there is need to setup the request and responses on the fake server. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, sopts...) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Create the control channel to the fake server. + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, bopts, nil) + if err != nil { + t.Fatalf("Failed to create control channel to RLS server: %v", err) + } + defer ctrlCh.close() + + // Perform the lookup and expect the callback to be invoked with an error. + errCh := make(chan error) + ctrlCh.lookup(nil, rlspb.RouteLookupRequest_REASON_MISS, staleHeaderData, func(_ []string, _ string, err error) { + if st, ok := status.FromError(err); !ok || st.Code() != wantCode || !wantErrRegex.MatchString(st.String()) { + errCh <- fmt.Errorf("rlsClient.lookup() returned error: %v, wantCode: %v, wantErr: %s", err, wantCode, wantErrRegex.String()) + return + } + errCh <- nil + }) + + select { + case <-time.After(defaultTestTimeout): + t.Fatal("timeout when waiting for lookup callback to be invoked") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} + +// TestControlChannelCredsFailure tests creation of the control channel with +// different credentials, which are expected to fail. +func (s) TestControlChannelCredsFailure(t *testing.T) { + serverCreds := makeTLSCreds(t, "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + clientCreds := makeTLSCreds(t, "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + + tests := []struct { + name string + sopts []grpc.ServerOption + bopts balancer.BuildOptions + wantCode codes.Code + wantErrRegex *regexp.Regexp + }{ + { + name: "transport creds authority mismatch", + sopts: []grpc.ServerOption{grpc.Creds(serverCreds)}, + bopts: balancer.BuildOptions{ + DialCreds: clientCreds, + Authority: "authority-mismatch", + }, + wantCode: codes.Unavailable, + wantErrRegex: regexp.MustCompile(`transport: authentication handshake failed: .* \*\.test\.example\.com.*authority-mismatch`), + }, + { + name: "transport creds handshake failure", + sopts: nil, // server expects insecure connection + bopts: balancer.BuildOptions{ + DialCreds: clientCreds, + Authority: "x.test.example.com", + }, + wantCode: codes.Unavailable, + wantErrRegex: regexp.MustCompile("transport: authentication handshake failed: .*"), + }, + { + name: "call creds mismatch", + sopts: []grpc.ServerOption{ + grpc.Creds(serverCreds), + grpc.UnaryInterceptor(callCredsValidatingServerInterceptor), // server expects call creds + }, + bopts: balancer.BuildOptions{ + CredsBundle: &testCredsBundle{ + transportCreds: clientCreds, + callCreds: &testPerRPCCredentials{}, // sends no call creds + }, + Authority: "x.test.example.com", + }, + wantCode: codes.PermissionDenied, + wantErrRegex: regexp.MustCompile("didn't find call creds"), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testControlChannelCredsFailure(t, test.sopts, test.bopts, test.wantCode, test.wantErrRegex) + }) + } +} + +type unsupportedCredsBundle struct { + credentials.Bundle +} + +func (*unsupportedCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) { + return nil, fmt.Errorf("unsupported mode: %v", mode) +} + +// TestNewControlChannelUnsupportedCredsBundle tests the case where the control +// channel is configured with a bundle which does not support the mode we use. +func (s) TestNewControlChannelUnsupportedCredsBundle(t *testing.T) { + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + + // Create the control channel to the fake server. + ctrlCh, err := newControlChannel(rlsServer.Address, "", defaultTestTimeout, balancer.BuildOptions{CredsBundle: &unsupportedCredsBundle{}}, nil) + if err == nil { + ctrlCh.close() + t.Fatal("newControlChannel succeeded when expected to fail") + } +} diff --git a/balancer/rls/helpers_test.go b/balancer/rls/helpers_test.go new file mode 100644 index 000000000000..9d2385efd658 --- /dev/null +++ b/balancer/rls/helpers_test.go @@ -0,0 +1,304 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer/rls/internal/test/e2e" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/stubserver" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 100 * time.Millisecond +) + +func init() { + balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// fakeBackoffStrategy is a fake implementation of the backoff.Strategy +// interface, for tests to inject the backoff duration. +type fakeBackoffStrategy struct { + backoff time.Duration +} + +func (f *fakeBackoffStrategy) Backoff(retries int) time.Duration { + return f.backoff +} + +// fakeThrottler is a fake implementation of the adaptiveThrottler interface. +type fakeThrottler struct { + throttleFunc func() bool // Fake throttler implementation. + throttleCh chan struct{} // Invocation of ShouldThrottle signals here. +} + +func (f *fakeThrottler) ShouldThrottle() bool { + select { + case <-f.throttleCh: + default: + } + f.throttleCh <- struct{}{} + + return f.throttleFunc() +} + +func (f *fakeThrottler) RegisterBackendResponse(bool) {} + +// alwaysThrottlingThrottler returns a fake throttler which always throttles. +func alwaysThrottlingThrottler() *fakeThrottler { + return &fakeThrottler{ + throttleFunc: func() bool { return true }, + throttleCh: make(chan struct{}, 1), + } +} + +// neverThrottlingThrottler returns a fake throttler which never throttles. +func neverThrottlingThrottler() *fakeThrottler { + return &fakeThrottler{ + throttleFunc: func() bool { return false }, + throttleCh: make(chan struct{}, 1), + } +} + +// oneTimeAllowingThrottler returns a fake throttler which does not throttle +// requests until the client RPC succeeds, but throttles everything that comes +// after. This is useful for tests which need to set up a valid cache entry +// before testing other cases. +func oneTimeAllowingThrottler(firstRPCDone *grpcsync.Event) *fakeThrottler { + return &fakeThrottler{ + throttleFunc: firstRPCDone.HasFired, + throttleCh: make(chan struct{}, 1), + } +} + +func overrideAdaptiveThrottler(t *testing.T, f *fakeThrottler) { + origAdaptiveThrottler := newAdaptiveThrottler + newAdaptiveThrottler = func() adaptiveThrottler { return f } + t.Cleanup(func() { newAdaptiveThrottler = origAdaptiveThrottler }) +} + +// buildBasicRLSConfig constructs a basic service config for the RLS LB policy +// with header matching rules. This expects the passed child policy name to +// have been registered by the caller. +func buildBasicRLSConfig(childPolicyName, rlsServerAddress string) *e2e.RLSConfig { + return &e2e.RLSConfig{ + RouteLookupConfig: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "grpc.testing.TestService"}}, + Headers: []*rlspb.NameMatcher{ + {Key: "k1", Names: []string{"n1"}}, + {Key: "k2", Names: []string{"n2"}}, + }, + }, + }, + LookupService: rlsServerAddress, + LookupServiceTimeout: durationpb.New(defaultTestTimeout), + CacheSizeBytes: 1024, + }, + RouteLookupChannelServiceConfig: `{"loadBalancingConfig": [{"pick_first": {}}]}`, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, + ChildPolicyConfigTargetFieldName: e2e.RLSChildPolicyTargetNameField, + } +} + +// buildBasicRLSConfigWithChildPolicy constructs a very basic service config for +// the RLS LB policy. It also registers a test LB policy which is capable of +// being a child of the RLS LB policy. +func buildBasicRLSConfigWithChildPolicy(t *testing.T, childPolicyName, rlsServerAddress string) *e2e.RLSConfig { + childPolicyName = "test-child-policy" + childPolicyName + e2e.RegisterRLSChildPolicy(childPolicyName, nil) + t.Logf("Registered child policy with name %q", childPolicyName) + + return &e2e.RLSConfig{ + RouteLookupConfig: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{{Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "grpc.testing.TestService"}}}}, + LookupService: rlsServerAddress, + LookupServiceTimeout: durationpb.New(defaultTestTimeout), + CacheSizeBytes: 1024, + }, + RouteLookupChannelServiceConfig: `{"loadBalancingConfig": [{"pick_first": {}}]}`, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, + ChildPolicyConfigTargetFieldName: e2e.RLSChildPolicyTargetNameField, + } +} + +// startBackend starts a backend implementing the TestService on a local port. +// It returns a channel for tests to get notified whenever an RPC is invoked on +// the backend. This allows tests to ensure that RPCs reach expected backends. +// Also returns the address of the backend. +func startBackend(t *testing.T, sopts ...grpc.ServerOption) (rpcCh chan struct{}, address string) { + t.Helper() + + rpcCh = make(chan struct{}, 1) + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + select { + case rpcCh <- struct{}{}: + default: + } + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(sopts...); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + return rpcCh, backend.Address +} + +// startManualResolverWithConfig registers and returns a manual resolver which +// pushes the RLS LB policy's service config on the channel. +func startManualResolverWithConfig(t *testing.T, rlsConfig *e2e.RLSConfig) *manual.Resolver { + t.Helper() + + scJSON, err := rlsConfig.ServiceConfigJSON() + if err != nil { + t.Fatal(err) + } + + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(scJSON) + r := manual.NewBuilderWithScheme("rls-e2e") + r.InitialState(resolver.State{ServiceConfig: sc}) + t.Cleanup(r.Close) + return r +} + +// makeTestRPCAndExpectItToReachBackend is a test helper function which makes +// the EmptyCall RPC on the given ClientConn and verifies that it reaches a +// backend. The latter is accomplished by listening on the provided channel +// which gets pushed to whenever the backend in question gets an RPC. +// +// There are many instances where it can take a while before the attempted RPC +// reaches the expected backend. Examples include, but are not limited to: +// - control channel is changed in a config update. The RLS LB policy creates a +// new control channel, and sends a new picker to gRPC. But it takes a while +// before gRPC actually starts using the new picker. +// - test is waiting for a cache entry to expire after which we expect a +// different behavior because we have configured the fake RLS server to return +// different backends. +// +// Therefore, we do not return an error when the RPC fails. Instead, we wait for +// the context to expire before failing. +func makeTestRPCAndExpectItToReachBackend(ctx context.Context, t *testing.T, cc *grpc.ClientConn, ch chan struct{}) { + t.Helper() + + // Drain the backend channel before performing the RPC to remove any + // notifications from previous RPCs. + select { + case <-ch: + default: + } + + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for RPCs to be routed to the given target: %v", err) + } + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + client := testgrpc.NewTestServiceClient(cc) + client.EmptyCall(sCtx, &testpb.Empty{}) + + select { + case <-sCtx.Done(): + case <-ch: + sCancel() + return + } + } +} + +// makeTestRPCAndVerifyError is a test helper function which makes the EmptyCall +// RPC on the given ClientConn and verifies that the RPC fails with the given +// status code and error. +// +// Similar to makeTestRPCAndExpectItToReachBackend, retries until expected +// outcome is reached or the provided context has expired. +func makeTestRPCAndVerifyError(ctx context.Context, t *testing.T, cc *grpc.ClientConn, wantCode codes.Code, wantErr error) { + t.Helper() + + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for RPCs to fail with given error: %v", err) + } + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + client := testgrpc.NewTestServiceClient(cc) + _, err := client.EmptyCall(sCtx, &testpb.Empty{}) + + // If the RPC fails with the expected code and expected error message (if + // one was provided), we return. Else we retry after blocking for a little + // while to ensure that we don't keep blasting away with RPCs. + if code := status.Code(err); code == wantCode { + if wantErr == nil || strings.Contains(err.Error(), wantErr.Error()) { + sCancel() + return + } + } + <-sCtx.Done() + } +} + +// verifyRLSRequest is a test helper which listens on a channel to see if an RLS +// request was received by the fake RLS server. Based on whether the test +// expects a request to be sent out or not, it uses a different timeout. +func verifyRLSRequest(t *testing.T, ch chan struct{}, wantRequest bool) { + t.Helper() + + if wantRequest { + select { + case <-time.After(defaultTestTimeout): + t.Fatalf("Timeout when waiting for an RLS request to be sent out") + case <-ch: + } + } else { + select { + case <-time.After(defaultTestShortTimeout): + case <-ch: + t.Fatalf("RLS request sent out when not expecting one") + } + } +} diff --git a/balancer/rls/internal/adaptive/adaptive.go b/balancer/rls/internal/adaptive/adaptive.go index 4adae1bde6b4..a3b0931b2955 100644 --- a/balancer/rls/internal/adaptive/adaptive.go +++ b/balancer/rls/internal/adaptive/adaptive.go @@ -45,21 +45,21 @@ const ( // The throttler has the following knobs for which we will use defaults for // now. If there is a need to make them configurable at a later point in time, // support for the same will be added. -// * Duration: amount of recent history that will be taken into account for -// making client-side throttling decisions. A default of 30 seconds is used. -// * Bins: number of bins to be used for bucketing historical data. A default -// of 100 is used. -// * RatioForAccepts: ratio by which accepts are multiplied, typically a value -// slightly larger than 1.0. This is used to make the throttler behave as if -// the backend had accepted more requests than it actually has, which lets us -// err on the side of sending to the backend more requests than we think it -// will accept for the sake of speeding up the propagation of state. A -// default of 2.0 is used. -// * RequestsPadding: is used to decrease the (client-side) throttling -// probability in the low QPS regime (to speed up propagation of state), as -// well as to safeguard against hitting a client-side throttling probability -// of 100%. The weight of this value decreases as the number of requests in -// recent history grows. A default of 8 is used. +// - Duration: amount of recent history that will be taken into account for +// making client-side throttling decisions. A default of 30 seconds is used. +// - Bins: number of bins to be used for bucketing historical data. A default +// of 100 is used. +// - RatioForAccepts: ratio by which accepts are multiplied, typically a value +// slightly larger than 1.0. This is used to make the throttler behave as if +// the backend had accepted more requests than it actually has, which lets us +// err on the side of sending to the backend more requests than we think it +// will accept for the sake of speeding up the propagation of state. A +// default of 2.0 is used. +// - RequestsPadding: is used to decrease the (client-side) throttling +// probability in the low QPS regime (to speed up propagation of state), as +// well as to safeguard against hitting a client-side throttling probability +// of 100%. The weight of this value decreases as the number of requests in +// recent history grows. A default of 8 is used. // // The adaptive throttler attempts to estimate the probability that a request // will be throttled using recent history. Server requests (both throttled and diff --git a/balancer/rls/internal/adaptive/adaptive_test.go b/balancer/rls/internal/adaptive/adaptive_test.go index 40a846a388a4..2205b533eec7 100644 --- a/balancer/rls/internal/adaptive/adaptive_test.go +++ b/balancer/rls/internal/adaptive/adaptive_test.go @@ -156,7 +156,7 @@ func TestShouldThrottleOptions(t *testing.T) { for _, test := range testcases { t.Run(test.desc, func(t *testing.T) { m.SetNanos(0) - th := newWithArgs(time.Duration(time.Nanosecond), 1, test.ratioForAccepts, test.requestsPadding) + th := newWithArgs(time.Nanosecond, 1, test.ratioForAccepts, test.requestsPadding) for i, response := range responses { if response != E { th.RegisterBackendResponse(response == T) diff --git a/balancer/rls/internal/balancer.go b/balancer/rls/internal/balancer.go deleted file mode 100644 index 7af97b76faf1..000000000000 --- a/balancer/rls/internal/balancer.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "sync" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcsync" -) - -var ( - _ balancer.Balancer = (*rlsBalancer)(nil) - - // For overriding in tests. - newRLSClientFunc = newRLSClient - logger = grpclog.Component("rls") -) - -// rlsBalancer implements the RLS LB policy. -type rlsBalancer struct { - done *grpcsync.Event - cc balancer.ClientConn - opts balancer.BuildOptions - - // Mutex protects all the state maintained by the LB policy. - // TODO(easwars): Once we add the cache, we will also have another lock for - // the cache alone. - mu sync.Mutex - lbCfg *lbConfig // Most recently received service config. - rlsCC *grpc.ClientConn // ClientConn to the RLS server. - rlsC *rlsClient // RLS client wrapper. - - ccUpdateCh chan *balancer.ClientConnState -} - -// run is a long running goroutine which handles all the updates that the -// balancer wishes to handle. The appropriate updateHandler will push the update -// on to a channel that this goroutine will select on, thereby the handling of -// the update will happen asynchronously. -func (lb *rlsBalancer) run() { - for { - // TODO(easwars): Handle other updates like subConn state changes, RLS - // responses from the server etc. - select { - case u := <-lb.ccUpdateCh: - lb.handleClientConnUpdate(u) - case <-lb.done.Done(): - return - } - } -} - -// handleClientConnUpdate handles updates to the service config. -// If the RLS server name or the RLS RPC timeout changes, it updates the control -// channel accordingly. -// TODO(easwars): Handle updates to other fields in the service config. -func (lb *rlsBalancer) handleClientConnUpdate(ccs *balancer.ClientConnState) { - logger.Infof("rls: service config: %+v", ccs.BalancerConfig) - lb.mu.Lock() - defer lb.mu.Unlock() - - if lb.done.HasFired() { - logger.Warning("rls: received service config after balancer close") - return - } - - newCfg := ccs.BalancerConfig.(*lbConfig) - if lb.lbCfg.Equal(newCfg) { - logger.Info("rls: new service config matches existing config") - return - } - - lb.updateControlChannel(newCfg) - lb.lbCfg = newCfg -} - -// UpdateClientConnState pushes the received ClientConnState update on the -// update channel which will be processed asynchronously by the run goroutine. -// Implements balancer.Balancer interface. -func (lb *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - select { - case lb.ccUpdateCh <- &ccs: - case <-lb.done.Done(): - } - return nil -} - -// ResolverErr implements balancer.Balancer interface. -func (lb *rlsBalancer) ResolverError(error) { - // ResolverError is called by gRPC when the name resolver reports an error. - // TODO(easwars): How do we handle this? - logger.Fatal("rls: ResolverError is not yet unimplemented") -} - -// UpdateSubConnState implements balancer.Balancer interface. -func (lb *rlsBalancer) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) { - logger.Fatal("rls: UpdateSubConnState is not yet implemented") -} - -// Cleans up the resources allocated by the LB policy including the clientConn -// to the RLS server. -// Implements balancer.Balancer. -func (lb *rlsBalancer) Close() { - lb.mu.Lock() - defer lb.mu.Unlock() - - lb.done.Fire() - if lb.rlsCC != nil { - lb.rlsCC.Close() - } -} - -// updateControlChannel updates the RLS client if required. -// Caller must hold lb.mu. -func (lb *rlsBalancer) updateControlChannel(newCfg *lbConfig) { - oldCfg := lb.lbCfg - if newCfg.lookupService == oldCfg.lookupService && newCfg.lookupServiceTimeout == oldCfg.lookupServiceTimeout { - return - } - - // Use RPC timeout from new config, if different from existing one. - timeout := oldCfg.lookupServiceTimeout - if timeout != newCfg.lookupServiceTimeout { - timeout = newCfg.lookupServiceTimeout - } - - if newCfg.lookupService == oldCfg.lookupService { - // This is the case where only the timeout has changed. We will continue - // to use the existing clientConn. but will create a new rlsClient with - // the new timeout. - lb.rlsC = newRLSClientFunc(lb.rlsCC, lb.opts.Target.Endpoint, timeout) - return - } - - // This is the case where the RLS server name has changed. We need to create - // a new clientConn and close the old one. - var dopts []grpc.DialOption - if dialer := lb.opts.Dialer; dialer != nil { - dopts = append(dopts, grpc.WithContextDialer(dialer)) - } - dopts = append(dopts, dialCreds(lb.opts)) - - cc, err := grpc.Dial(newCfg.lookupService, dopts...) - if err != nil { - logger.Errorf("rls: dialRLS(%s, %v): %v", newCfg.lookupService, lb.opts, err) - // An error from a non-blocking dial indicates something serious. We - // should continue to use the old control channel if one exists, and - // return so that the rest of the config updates can be processes. - return - } - if lb.rlsCC != nil { - lb.rlsCC.Close() - } - lb.rlsCC = cc - lb.rlsC = newRLSClientFunc(cc, lb.opts.Target.Endpoint, timeout) -} - -func dialCreds(opts balancer.BuildOptions) grpc.DialOption { - // The control channel should use the same authority as that of the parent - // channel. This ensures that the identify of the RLS server and that of the - // backend is the same, so if the RLS config is injected by an attacker, it - // cannot cause leakage of private information contained in headers set by - // the application. - server := opts.Target.Authority - switch { - case opts.DialCreds != nil: - if err := opts.DialCreds.OverrideServerName(server); err != nil { - logger.Warningf("rls: OverrideServerName(%s) = (%v), using Insecure", server, err) - return grpc.WithInsecure() - } - return grpc.WithTransportCredentials(opts.DialCreds) - case opts.CredsBundle != nil: - return grpc.WithTransportCredentials(opts.CredsBundle.TransportCredentials()) - default: - logger.Warning("rls: no credentials available, using Insecure") - return grpc.WithInsecure() - } -} diff --git a/balancer/rls/internal/balancer_test.go b/balancer/rls/internal/balancer_test.go deleted file mode 100644 index 2378a86fff10..000000000000 --- a/balancer/rls/internal/balancer_test.go +++ /dev/null @@ -1,238 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "context" - "net" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/rls/internal/testutils/fakeserver" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/testdata" -) - -const defaultTestTimeout = 1 * time.Second - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -type listenerWrapper struct { - net.Listener - connCh *testutils.Channel -} - -// Accept waits for and returns the next connection to the listener. -func (l *listenerWrapper) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return nil, err - } - l.connCh.Send(c) - return c, nil -} - -func setupwithListener(t *testing.T, opts ...grpc.ServerOption) (*fakeserver.Server, *listenerWrapper, func()) { - t.Helper() - - l, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("net.Listen(tcp, localhost:0): %v", err) - } - lw := &listenerWrapper{ - Listener: l, - connCh: testutils.NewChannel(), - } - - server, cleanup, err := fakeserver.Start(lw, opts...) - if err != nil { - t.Fatalf("fakeserver.Start(): %v", err) - } - t.Logf("Fake RLS server started at %s ...", server.Address) - - return server, lw, cleanup -} - -type testBalancerCC struct { - balancer.ClientConn -} - -// TestUpdateControlChannelFirstConfig tests the scenario where the LB policy -// receives its first service config and verifies that a control channel to the -// RLS server specified in the serviceConfig is established. -func (s) TestUpdateControlChannelFirstConfig(t *testing.T) { - server, lis, cleanup := setupwithListener(t) - defer cleanup() - - bb := balancer.Get(rlsBalancerName) - if bb == nil { - t.Fatalf("balancer.Get(%s) = nil", rlsBalancerName) - } - rlsB := bb.Build(&testBalancerCC{}, balancer.BuildOptions{}) - defer rlsB.Close() - t.Log("Built RLS LB policy ...") - - lbCfg := &lbConfig{lookupService: server.Address} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := lis.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - // TODO: Verify channel connectivity state once control channel connectivity - // state monitoring is in place. - - // TODO: Verify RLS RPC can be made once we integrate with the picker. -} - -// TestUpdateControlChannelSwitch tests the scenario where a control channel -// exists and the LB policy receives a new serviceConfig with a different RLS -// server name. Verifies that the new control channel is created and the old one -// is closed (the leakchecker takes care of this). -func (s) TestUpdateControlChannelSwitch(t *testing.T) { - server1, lis1, cleanup1 := setupwithListener(t) - defer cleanup1() - - server2, lis2, cleanup2 := setupwithListener(t) - defer cleanup2() - - bb := balancer.Get(rlsBalancerName) - if bb == nil { - t.Fatalf("balancer.Get(%s) = nil", rlsBalancerName) - } - rlsB := bb.Build(&testBalancerCC{}, balancer.BuildOptions{}) - defer rlsB.Close() - t.Log("Built RLS LB policy ...") - - lbCfg := &lbConfig{lookupService: server1.Address} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := lis1.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - lbCfg = &lbConfig{lookupService: server2.Address} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - if _, err := lis2.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - // TODO: Verify channel connectivity state once control channel connectivity - // state monitoring is in place. - - // TODO: Verify RLS RPC can be made once we integrate with the picker. -} - -// TestUpdateControlChannelTimeout tests the scenario where the LB policy -// receives a service config update with a different lookupServiceTimeout, but -// the lookupService itself remains unchanged. It verifies that the LB policy -// does not create a new control channel in this case. -func (s) TestUpdateControlChannelTimeout(t *testing.T) { - server, lis, cleanup := setupwithListener(t) - defer cleanup() - - bb := balancer.Get(rlsBalancerName) - if bb == nil { - t.Fatalf("balancer.Get(%s) = nil", rlsBalancerName) - } - rlsB := bb.Build(&testBalancerCC{}, balancer.BuildOptions{}) - defer rlsB.Close() - t.Log("Built RLS LB policy ...") - - lbCfg := &lbConfig{lookupService: server.Address, lookupServiceTimeout: 1 * time.Second} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := lis.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - lbCfg = &lbConfig{lookupService: server.Address, lookupServiceTimeout: 2 * time.Second} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - if _, err := lis.connCh.Receive(ctx); err != context.DeadlineExceeded { - t.Fatal("LB policy created new control channel when only lookupServiceTimeout changed") - } - - // TODO: Verify channel connectivity state once control channel connectivity - // state monitoring is in place. - - // TODO: Verify RLS RPC can be made once we integrate with the picker. -} - -// TestUpdateControlChannelWithCreds tests the scenario where the control -// channel is to established with credentials from the parent channel. -func (s) TestUpdateControlChannelWithCreds(t *testing.T) { - sCreds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) - if err != nil { - t.Fatalf("credentials.NewServerTLSFromFile(server1.pem, server1.key) = %v", err) - } - cCreds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "") - if err != nil { - t.Fatalf("credentials.NewClientTLSFromFile(ca.pem) = %v", err) - } - - server, lis, cleanup := setupwithListener(t, grpc.Creds(sCreds)) - defer cleanup() - - bb := balancer.Get(rlsBalancerName) - if bb == nil { - t.Fatalf("balancer.Get(%s) = nil", rlsBalancerName) - } - rlsB := bb.Build(&testBalancerCC{}, balancer.BuildOptions{ - DialCreds: cCreds, - }) - defer rlsB.Close() - t.Log("Built RLS LB policy ...") - - lbCfg := &lbConfig{lookupService: server.Address} - t.Logf("Sending service config %+v to RLS LB policy ...", lbCfg) - rlsB.UpdateClientConnState(balancer.ClientConnState{BalancerConfig: lbCfg}) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := lis.connCh.Receive(ctx); err != nil { - t.Fatal("Timeout expired when waiting for LB policy to create control channel") - } - - // TODO: Verify channel connectivity state once control channel connectivity - // state monitoring is in place. - - // TODO: Verify RLS RPC can be made once we integrate with the picker. -} diff --git a/balancer/rls/internal/builder.go b/balancer/rls/internal/builder.go deleted file mode 100644 index 7c29caef4047..000000000000 --- a/balancer/rls/internal/builder.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package rls implements the RLS LB policy. -package rls - -import ( - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/grpcsync" -) - -const rlsBalancerName = "rls" - -func init() { - balancer.Register(&rlsBB{}) -} - -// rlsBB helps build RLS load balancers and parse the service config to be -// passed to the RLS load balancer. -type rlsBB struct{} - -// Name returns the name of the RLS LB policy and helps implement the -// balancer.Balancer interface. -func (*rlsBB) Name() string { - return rlsBalancerName -} - -func (*rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - lb := &rlsBalancer{ - done: grpcsync.NewEvent(), - cc: cc, - opts: opts, - lbCfg: &lbConfig{}, - ccUpdateCh: make(chan *balancer.ClientConnState), - } - go lb.run() - return lb -} diff --git a/balancer/rls/internal/cache/cache.go b/balancer/rls/internal/cache/cache.go deleted file mode 100644 index b975c3078fdb..000000000000 --- a/balancer/rls/internal/cache/cache.go +++ /dev/null @@ -1,244 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package cache provides an LRU cache implementation to be used by the RLS LB -// policy to cache RLS response data. -package cache - -import ( - "container/list" - "sync" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" -) - -var logger = grpclog.Component("rls") - -// Key represents the cache key used to uniquely identify a cache entry. -type Key struct { - // Path is the full path of the incoming RPC request. - Path string - // KeyMap is a stringified version of the RLS request keys built using the - // RLS keyBuilder. Since map is not a Type which is comparable in Go, it - // cannot be part of the key for another map (the LRU cache is implemented - // using a native map type). - KeyMap string -} - -// Entry wraps all the data to be stored in a cache entry. -type Entry struct { - // Mu synchronizes access to this particular cache entry. The LB policy - // will also hold another mutex to synchronize access to the cache as a - // whole. To avoid holding the top-level mutex for the whole duration for - // which one particular cache entry is acted upon, we use this entry mutex. - Mu sync.Mutex - // ExpiryTime is the absolute time at which the data cached as part of this - // entry stops being valid. When an RLS request succeeds, this is set to - // the current time plus the max_age field from the LB policy config. An - // entry with this field in the past is not used to process picks. - ExpiryTime time.Time - // BackoffExpiryTime is the absolute time at which an entry which has gone - // through backoff stops being valid. When an RLS request fails, this is - // set to the current time plus twice the backoff time. The cache expiry - // timer will only delete entries for which both ExpiryTime and - // BackoffExpiryTime are in the past. - BackoffExpiryTime time.Time - // StaleTime is the absolute time after which this entry will be - // proactively refreshed if we receive a request for it. When an RLS - // request succeeds, this is set to the current time plus the stale_age - // from the LB policy config. - StaleTime time.Time - // BackoffTime is the absolute time at which the backoff period for this - // entry ends. The backoff timer is setup with this value. No new RLS - // requests are sent out for this entry until the backoff period ends. - BackoffTime time.Time - // EarliestEvictTime is the absolute time before which this entry should - // not be evicted from the cache. This is set to a default value of 5 - // seconds when the entry is created. This is required to make sure that a - // new entry added to the cache is not evicted before the RLS response - // arrives (usually when the cache is too small). - EarliestEvictTime time.Time - // CallStatus stores the RPC status of the previous RLS request for this - // entry. Picks for entries with a non-nil value for this field are failed - // with the error stored here. - CallStatus error - // Backoff contains all backoff related state. When an RLS request - // succeeds, backoff state is reset. - Backoff BackoffState - // HeaderData is received in an RLS response and is to be sent in the - // X-Google-RLS-Data header for matching RPCs. - HeaderData string - // ChildPicker is a very thin wrapper around the child policy wrapper. - // The type is declared as a Picker interface since the users of - // the cache only care about the picker provided by the child policy, and - // this makes it easy for testing. - ChildPicker balancer.Picker - - // size stores the size of this cache entry. Uses only a subset of the - // fields. See `entrySize` for this is computed. - size int64 - // key contains the cache key corresponding to this entry. This is required - // from methods like `removeElement` which only have a pointer to the - // list.Element which contains a reference to the cache.Entry. But these - // methods need the cache.Key to be able to remove the entry from the - // underlying map. - key Key -} - -// BackoffState wraps all backoff related state associated with a cache entry. -type BackoffState struct { - // Retries keeps track of the number of RLS failures, to be able to - // determine the amount of time to backoff before the next attempt. - Retries int - // Backoff is an exponential backoff implementation which returns the - // amount of time to backoff, given the number of retries. - Backoff backoff.Strategy - // Timer fires when the backoff period ends and incoming requests after - // this will trigger a new RLS request. - Timer *time.Timer - // Callback provided by the LB policy to be notified when the backoff timer - // expires. This will trigger a new picker to be returned to the - // ClientConn, to force queued up RPCs to be retried. - Callback func() -} - -// LRU is a cache with a least recently used eviction policy. It is not safe -// for concurrent access. -type LRU struct { - maxSize int64 - usedSize int64 - onEvicted func(Key, *Entry) - - ll *list.List - cache map[Key]*list.Element -} - -// NewLRU creates a cache.LRU with a size limit of maxSize and the provided -// eviction callback. -// -// Currently, only the cache.Key and the HeaderData field from cache.Entry -// count towards the size of the cache (other overhead per cache entry is not -// counted). The cache could temporarily exceed the configured maxSize because -// we want the entries to spend a configured minimum amount of time in the -// cache before they are LRU evicted (so that all the work performed in sending -// an RLS request and caching the response is not a total waste). -// -// The provided onEvited callback must not attempt to re-add the entry inline -// and the RLS LB policy does not have a need to do that. -// -// The cache package trusts the RLS policy (its only user) to supply a default -// minimum non-zero maxSize, in the event that the ServiceConfig does not -// provide a value for it. -func NewLRU(maxSize int64, onEvicted func(Key, *Entry)) *LRU { - return &LRU{ - maxSize: maxSize, - onEvicted: onEvicted, - ll: list.New(), - cache: make(map[Key]*list.Element), - } -} - -// Resize sets the size limit of the LRU to newMaxSize and removes older -// entries, if required, to comply with the new limit. -func (lru *LRU) Resize(newMaxSize int64) { - lru.maxSize = newMaxSize - lru.removeToFit(0) -} - -// TODO(easwars): If required, make this function more sophisticated. -func entrySize(key Key, value *Entry) int64 { - return int64(len(key.Path) + len(key.KeyMap) + len(value.HeaderData)) -} - -// removeToFit removes older entries from the cache to make room for a new -// entry of size newSize. -func (lru *LRU) removeToFit(newSize int64) { - now := time.Now() - for lru.usedSize+newSize > lru.maxSize { - elem := lru.ll.Back() - if elem == nil { - // This is a corner case where the cache is empty, but the new entry - // to be added is bigger than maxSize. - logger.Info("rls: newly added cache entry exceeds cache maxSize") - return - } - - entry := elem.Value.(*Entry) - if t := entry.EarliestEvictTime; !t.IsZero() && t.Before(now) { - // When the oldest entry is too new (it hasn't even spent a default - // minimum amount of time in the cache), we abort and allow the - // cache to grow bigger than the configured maxSize. - logger.Info("rls: LRU eviction finds oldest entry to be too new. Allowing cache to exceed maxSize momentarily") - return - } - lru.removeElement(elem) - } -} - -// Add adds a new entry to the cache. -func (lru *LRU) Add(key Key, value *Entry) { - size := entrySize(key, value) - elem, ok := lru.cache[key] - if !ok { - lru.removeToFit(size) - lru.usedSize += size - value.size = size - value.key = key - elem := lru.ll.PushFront(value) - lru.cache[key] = elem - return - } - - existing := elem.Value.(*Entry) - sizeDiff := size - existing.size - lru.removeToFit(sizeDiff) - value.size = size - elem.Value = value - lru.ll.MoveToFront(elem) - lru.usedSize += sizeDiff -} - -// Remove removes a cache entry wth key key, if one exists. -func (lru *LRU) Remove(key Key) { - if elem, ok := lru.cache[key]; ok { - lru.removeElement(elem) - } -} - -func (lru *LRU) removeElement(e *list.Element) { - entry := e.Value.(*Entry) - lru.ll.Remove(e) - delete(lru.cache, entry.key) - lru.usedSize -= entry.size - if lru.onEvicted != nil { - lru.onEvicted(entry.key, entry) - } -} - -// Get returns a cache entry with key key. -func (lru *LRU) Get(key Key) *Entry { - elem, ok := lru.cache[key] - if !ok { - return nil - } - lru.ll.MoveToFront(elem) - return elem.Value.(*Entry) -} diff --git a/balancer/rls/internal/cache/cache_test.go b/balancer/rls/internal/cache/cache_test.go deleted file mode 100644 index 7c480b64621e..000000000000 --- a/balancer/rls/internal/cache/cache_test.go +++ /dev/null @@ -1,262 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package cache - -import ( - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" -) - -const ( - defaultTestCacheSize = 5 - defaultTestCacheMaxSize = 1000000 - defaultTestTimeout = 1 * time.Second -) - -// TestGet verifies the Add and Get methods of cache.LRU. -func TestGet(t *testing.T) { - key1 := Key{Path: "/service1/method1", KeyMap: "k1=v1,k2=v2"} - key2 := Key{Path: "/service2/method2", KeyMap: "k1=v1,k2=v2"} - val1 := Entry{HeaderData: "h1=v1"} - val2 := Entry{HeaderData: "h2=v2"} - - tests := []struct { - desc string - keysToAdd []Key - valsToAdd []*Entry - keyToGet Key - wantEntry *Entry - }{ - { - desc: "Empty cache", - keyToGet: Key{}, - }, - { - desc: "Single entry miss", - keysToAdd: []Key{key1}, - valsToAdd: []*Entry{&val1}, - keyToGet: Key{}, - }, - { - desc: "Single entry hit", - keysToAdd: []Key{key1}, - valsToAdd: []*Entry{&val1}, - keyToGet: key1, - wantEntry: &val1, - }, - { - desc: "Multi entry miss", - keysToAdd: []Key{key1, key2}, - valsToAdd: []*Entry{&val1, &val2}, - keyToGet: Key{}, - }, - { - desc: "Multi entry hit", - keysToAdd: []Key{key1, key2}, - valsToAdd: []*Entry{&val1, &val2}, - keyToGet: key1, - wantEntry: &val1, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - lru := NewLRU(defaultTestCacheMaxSize, nil) - for i, key := range test.keysToAdd { - lru.Add(key, test.valsToAdd[i]) - } - opts := []cmp.Option{ - cmpopts.IgnoreInterfaces(struct{ sync.Locker }{}), - cmpopts.IgnoreUnexported(Entry{}), - } - if gotEntry := lru.Get(test.keyToGet); !cmp.Equal(gotEntry, test.wantEntry, opts...) { - t.Errorf("lru.Get(%+v) = %+v, want %+v", test.keyToGet, gotEntry, test.wantEntry) - } - }) - } -} - -// TestRemove verifies the Add and Remove methods of cache.LRU. -func TestRemove(t *testing.T) { - keys := []Key{ - {Path: "/service1/method1", KeyMap: "k1=v1,k2=v2"}, - {Path: "/service2/method2", KeyMap: "k1=v1,k2=v2"}, - {Path: "/service3/method3", KeyMap: "k1=v1,k2=v2"}, - } - - lru := NewLRU(defaultTestCacheMaxSize, nil) - for _, k := range keys { - lru.Add(k, &Entry{}) - } - for _, k := range keys { - lru.Remove(k) - if entry := lru.Get(k); entry != nil { - t.Fatalf("lru.Get(%+v) after a call to lru.Remove succeeds, should have failed", k) - } - } -} - -// TestExceedingSizeCausesEviction verifies the case where adding a new entry -// to the cache leads to eviction of old entries to make space for the new one. -func TestExceedingSizeCausesEviction(t *testing.T) { - evictCh := make(chan Key, defaultTestCacheSize) - onEvicted := func(k Key, _ *Entry) { - t.Logf("evicted key {%+v} from cache", k) - evictCh <- k - } - - keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} - keysCausingEviction := []Key{{Path: "f"}, {Path: "g"}, {Path: "h"}, {Path: "i"}, {Path: "j"}} - - lru := NewLRU(defaultTestCacheSize, onEvicted) - for _, key := range keysToFill { - lru.Add(key, &Entry{}) - } - - for i, key := range keysCausingEviction { - lru.Add(key, &Entry{}) - - timer := time.NewTimer(defaultTestTimeout) - select { - case <-timer.C: - t.Fatal("Test timeout waiting for eviction") - case k := <-evictCh: - timer.Stop() - if !cmp.Equal(k, keysToFill[i]) { - t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) - } - } - } -} - -// TestAddCausesMultipleEvictions verifies the case where adding one new entry -// causes the eviction of multiple old entries to make space for the new one. -func TestAddCausesMultipleEvictions(t *testing.T) { - evictCh := make(chan Key, defaultTestCacheSize) - onEvicted := func(k Key, _ *Entry) { - evictCh <- k - } - - keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} - keyCausingEviction := Key{Path: "abcde"} - - lru := NewLRU(defaultTestCacheSize, onEvicted) - for _, key := range keysToFill { - lru.Add(key, &Entry{}) - } - - lru.Add(keyCausingEviction, &Entry{}) - - for i := range keysToFill { - timer := time.NewTimer(defaultTestTimeout) - select { - case <-timer.C: - t.Fatal("Test timeout waiting for eviction") - case k := <-evictCh: - timer.Stop() - if !cmp.Equal(k, keysToFill[i]) { - t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) - } - } - } -} - -// TestModifyCausesMultipleEvictions verifies the case where mofiying an -// existing entry to increase its size leads to the eviction of older entries -// to make space for the new one. -func TestModifyCausesMultipleEvictions(t *testing.T) { - evictCh := make(chan Key, defaultTestCacheSize) - onEvicted := func(k Key, _ *Entry) { - evictCh <- k - } - - keysToFill := []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}} - lru := NewLRU(defaultTestCacheSize, onEvicted) - for _, key := range keysToFill { - lru.Add(key, &Entry{}) - } - - lru.Add(keysToFill[len(keysToFill)-1], &Entry{HeaderData: "xxxx"}) - for i := range keysToFill[:len(keysToFill)-1] { - timer := time.NewTimer(defaultTestTimeout) - select { - case <-timer.C: - t.Fatal("Test timeout waiting for eviction") - case k := <-evictCh: - timer.Stop() - if !cmp.Equal(k, keysToFill[i]) { - t.Fatalf("Evicted key %+v, wanted %+v", k, keysToFill[i]) - } - } - } -} - -func TestLRUResize(t *testing.T) { - tests := []struct { - desc string - maxSize int64 - keysToFill []Key - newMaxSize int64 - wantEvictedKeys []Key - }{ - { - desc: "resize causes multiple evictions", - maxSize: 5, - keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, - newMaxSize: 3, - wantEvictedKeys: []Key{{Path: "a"}, {Path: "b"}}, - }, - { - desc: "resize causes no evictions", - maxSize: 50, - keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, - newMaxSize: 10, - wantEvictedKeys: []Key{}, - }, - { - desc: "resize to higher value", - maxSize: 5, - keysToFill: []Key{{Path: "a"}, {Path: "b"}, {Path: "c"}, {Path: "d"}, {Path: "e"}}, - newMaxSize: 10, - wantEvictedKeys: []Key{}, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - var evictedKeys []Key - onEvicted := func(k Key, _ *Entry) { - evictedKeys = append(evictedKeys, k) - } - - lru := NewLRU(test.maxSize, onEvicted) - for _, key := range test.keysToFill { - lru.Add(key, &Entry{}) - } - lru.Resize(test.newMaxSize) - if !cmp.Equal(evictedKeys, test.wantEvictedKeys, cmpopts.EquateEmpty()) { - t.Fatalf("lru.Resize evicted keys {%v}, should have evicted {%v}", evictedKeys, test.wantEvictedKeys) - } - }) - } -} diff --git a/balancer/rls/internal/client.go b/balancer/rls/internal/client.go deleted file mode 100644 index 0e8a1c932f11..000000000000 --- a/balancer/rls/internal/client.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "context" - "time" - - "google.golang.org/grpc" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" -) - -// For gRPC services using RLS, the value of target_type in the -// RouteLookupServiceRequest will be set to this. -const grpcTargetType = "grpc" - -// rlsClient is a simple wrapper around a RouteLookupService client which -// provides non-blocking semantics on top of a blocking unary RPC call. -// -// The RLS LB policy creates a new rlsClient object with the following values: -// * a grpc.ClientConn to the RLS server using appropriate credentials from the -// parent channel -// * dialTarget corresponding to the original user dial target, e.g. -// "firestore.googleapis.com". -// -// The RLS LB policy uses an adaptive throttler to perform client side -// throttling and asks this client to make an RPC call only after checking with -// the throttler. -type rlsClient struct { - stub rlspb.RouteLookupServiceClient - // origDialTarget is the original dial target of the user and sent in each - // RouteLookup RPC made to the RLS server. - origDialTarget string - // rpcTimeout specifies the timeout for the RouteLookup RPC call. The LB - // policy receives this value in its service config. - rpcTimeout time.Duration -} - -func newRLSClient(cc *grpc.ClientConn, dialTarget string, rpcTimeout time.Duration) *rlsClient { - return &rlsClient{ - stub: rlspb.NewRouteLookupServiceClient(cc), - origDialTarget: dialTarget, - rpcTimeout: rpcTimeout, - } -} - -type lookupCallback func(targets []string, headerData string, err error) - -// lookup starts a RouteLookup RPC in a separate goroutine and returns the -// results (and error, if any) in the provided callback. -func (c *rlsClient) lookup(path string, keyMap map[string]string, cb lookupCallback) { - go func() { - ctx, cancel := context.WithTimeout(context.Background(), c.rpcTimeout) - resp, err := c.stub.RouteLookup(ctx, &rlspb.RouteLookupRequest{ - Server: c.origDialTarget, - Path: path, - TargetType: grpcTargetType, - KeyMap: keyMap, - }) - cb(resp.GetTargets(), resp.GetHeaderData(), err) - cancel() - }() -} diff --git a/balancer/rls/internal/client_test.go b/balancer/rls/internal/client_test.go deleted file mode 100644 index 4bf0303a2769..000000000000 --- a/balancer/rls/internal/client_test.go +++ /dev/null @@ -1,181 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/balancer/rls/internal/testutils/fakeserver" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/status" -) - -const ( - defaultDialTarget = "dummy" - defaultRPCTimeout = 5 * time.Second -) - -func setup(t *testing.T) (*fakeserver.Server, *grpc.ClientConn, func()) { - t.Helper() - - server, sCleanup, err := fakeserver.Start(nil) - if err != nil { - t.Fatalf("Failed to start fake RLS server: %v", err) - } - - cc, cCleanup, err := server.ClientConn() - if err != nil { - sCleanup() - t.Fatalf("Failed to get a ClientConn to the RLS server: %v", err) - } - - return server, cc, func() { - sCleanup() - cCleanup() - } -} - -// TestLookupFailure verifies the case where the RLS server returns an error. -func (s) TestLookupFailure(t *testing.T) { - server, cc, cleanup := setup(t) - defer cleanup() - - // We setup the fake server to return an error. - server.ResponseChan <- fakeserver.Response{Err: errors.New("rls failure")} - - rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout) - - errCh := testutils.NewChannel() - rlsClient.lookup("", nil, func(targets []string, headerData string, err error) { - if err == nil { - errCh.Send(errors.New("rlsClient.lookup() succeeded, should have failed")) - return - } - if len(targets) != 0 || headerData != "" { - errCh.Send(fmt.Errorf("rlsClient.lookup() = (%v, %s), want (nil, \"\")", targets, headerData)) - return - } - errCh.Send(nil) - }) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if e, err := errCh.Receive(ctx); err != nil || e != nil { - t.Fatalf("lookup error: %v, error receiving from channel: %v", e, err) - } -} - -// TestLookupDeadlineExceeded tests the case where the RPC deadline associated -// with the lookup expires. -func (s) TestLookupDeadlineExceeded(t *testing.T) { - _, cc, cleanup := setup(t) - defer cleanup() - - // Give the Lookup RPC a small deadline, but don't setup the fake server to - // return anything. So the Lookup call will block and eventually expire. - rlsClient := newRLSClient(cc, defaultDialTarget, 100*time.Millisecond) - - errCh := testutils.NewChannel() - rlsClient.lookup("", nil, func(_ []string, _ string, err error) { - if st, ok := status.FromError(err); !ok || st.Code() != codes.DeadlineExceeded { - errCh.Send(fmt.Errorf("rlsClient.lookup() returned error: %v, want %v", err, codes.DeadlineExceeded)) - return - } - errCh.Send(nil) - }) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if e, err := errCh.Receive(ctx); err != nil || e != nil { - t.Fatalf("lookup error: %v, error receiving from channel: %v", e, err) - } -} - -// TestLookupSuccess verifies the successful Lookup API case. -func (s) TestLookupSuccess(t *testing.T) { - server, cc, cleanup := setup(t) - defer cleanup() - - const ( - rlsReqPath = "/service/method" - wantHeaderData = "headerData" - ) - - rlsReqKeyMap := map[string]string{ - "k1": "v1", - "k2": "v2", - } - wantLookupRequest := &rlspb.RouteLookupRequest{ - Server: defaultDialTarget, - Path: rlsReqPath, - TargetType: "grpc", - KeyMap: rlsReqKeyMap, - } - wantRespTargets := []string{"us_east_1.firestore.googleapis.com"} - - rlsClient := newRLSClient(cc, defaultDialTarget, defaultRPCTimeout) - - errCh := testutils.NewChannel() - rlsClient.lookup(rlsReqPath, rlsReqKeyMap, func(targets []string, hd string, err error) { - if err != nil { - errCh.Send(fmt.Errorf("rlsClient.Lookup() failed: %v", err)) - return - } - if !cmp.Equal(targets, wantRespTargets) || hd != wantHeaderData { - errCh.Send(fmt.Errorf("rlsClient.lookup() = (%v, %s), want (%v, %s)", targets, hd, wantRespTargets, wantHeaderData)) - return - } - errCh.Send(nil) - }) - - // Make sure that the fake server received the expected RouteLookupRequest - // proto. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - req, err := server.RequestChan.Receive(ctx) - if err != nil { - t.Fatalf("Timed out wile waiting for a RouteLookupRequest") - } - gotLookupRequest := req.(*rlspb.RouteLookupRequest) - if diff := cmp.Diff(wantLookupRequest, gotLookupRequest, cmp.Comparer(proto.Equal)); diff != "" { - t.Fatalf("RouteLookupRequest diff (-want, +got):\n%s", diff) - } - - // We setup the fake server to return this response when it receives a - // request. - server.ResponseChan <- fakeserver.Response{ - Resp: &rlspb.RouteLookupResponse{ - Targets: wantRespTargets, - HeaderData: wantHeaderData, - }, - } - - if e, err := errCh.Receive(ctx); err != nil || e != nil { - t.Fatalf("lookup error: %v, error receiving from channel: %v", e, err) - } -} diff --git a/balancer/rls/internal/config.go b/balancer/rls/internal/config.go deleted file mode 100644 index a3deb8906c9a..000000000000 --- a/balancer/rls/internal/config.go +++ /dev/null @@ -1,326 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "bytes" - "encoding/json" - "fmt" - "time" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/ptypes" - durationpb "github.com/golang/protobuf/ptypes/duration" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/rls/internal/keys" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/internal/grpcutil" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -const ( - // This is max duration that we are willing to cache RLS responses. If the - // service config doesn't specify a value for max_age or if it specified a - // value greater that this, we will use this value instead. - maxMaxAge = 5 * time.Minute - // If lookup_service_timeout is not specified in the service config, we use - // a default of 10 seconds. - defaultLookupServiceTimeout = 10 * time.Second - // This is set to the targetNameField in the child policy config during - // service config validation. - dummyChildPolicyTarget = "target_name_to_be_filled_in_later" -) - -// lbConfig contains the parsed and validated contents of the -// loadBalancingConfig section of the service config. The RLS LB policy will -// use this to directly access config data instead of ploughing through proto -// fields. -type lbConfig struct { - serviceconfig.LoadBalancingConfig - - kbMap keys.BuilderMap - lookupService string - lookupServiceTimeout time.Duration - maxAge time.Duration - staleAge time.Duration - cacheSizeBytes int64 - defaultTarget string - cpName string - cpTargetField string - cpConfig map[string]json.RawMessage -} - -func (lbCfg *lbConfig) Equal(other *lbConfig) bool { - return lbCfg.kbMap.Equal(other.kbMap) && - lbCfg.lookupService == other.lookupService && - lbCfg.lookupServiceTimeout == other.lookupServiceTimeout && - lbCfg.maxAge == other.maxAge && - lbCfg.staleAge == other.staleAge && - lbCfg.cacheSizeBytes == other.cacheSizeBytes && - lbCfg.defaultTarget == other.defaultTarget && - lbCfg.cpName == other.cpName && - lbCfg.cpTargetField == other.cpTargetField && - cpConfigEqual(lbCfg.cpConfig, other.cpConfig) -} - -func cpConfigEqual(am, bm map[string]json.RawMessage) bool { - if (bm == nil) != (am == nil) { - return false - } - if len(bm) != len(am) { - return false - } - - for k, jsonA := range am { - jsonB, ok := bm[k] - if !ok { - return false - } - if !bytes.Equal(jsonA, jsonB) { - return false - } - } - return true -} - -// This struct resembles the JSON respresentation of the loadBalancing config -// and makes it easier to unmarshal. -type lbConfigJSON struct { - RouteLookupConfig json.RawMessage - ChildPolicy []*loadBalancingConfig - ChildPolicyConfigTargetFieldName string -} - -// loadBalancingConfig represents a single load balancing config, -// stored in JSON format. -// -// TODO(easwars): This code seems to be repeated in a few places -// (service_config.go and in the xds code as well). Refactor and re-use. -type loadBalancingConfig struct { - Name string - Config json.RawMessage -} - -// MarshalJSON returns a JSON encoding of l. -func (l *loadBalancingConfig) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("rls: loadBalancingConfig.MarshalJSON() is unimplemented") -} - -// UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. -func (l *loadBalancingConfig) UnmarshalJSON(data []byte) error { - var cfg map[string]json.RawMessage - if err := json.Unmarshal(data, &cfg); err != nil { - return err - } - for name, config := range cfg { - l.Name = name - l.Config = config - } - return nil -} - -// ParseConfig parses and validates the JSON representation of the service -// config and returns the loadBalancingConfig to be used by the RLS LB policy. -// -// Helps implement the balancer.ConfigParser interface. -// -// The following validation checks are performed: -// * routeLookupConfig: -// ** grpc_keybuilders field: -// - must have at least one entry -// - must not have two entries with the same Name -// - must not have any entry with a Name with the service field unset or -// empty -// - must not have any entries without a Name -// - must not have a headers entry that has required_match set -// - must not have two headers entries with the same key within one entry -// ** lookup_service field: -// - must be set and non-empty and must parse as a target URI -// ** max_age field: -// - if not specified or is greater than maxMaxAge, it will be reset to -// maxMaxAge -// ** stale_age field: -// - if the value is greater than or equal to max_age, it is ignored -// - if set, then max_age must also be set -// ** valid_targets field: -// - will be ignored -// ** cache_size_bytes field: -// - must be greater than zero -// - TODO(easwars): Define a minimum value for this field, to be used when -// left unspecified -// * childPolicy field: -// - must find a valid child policy with a valid config (the child policy must -// be able to parse the provided config successfully when we pass it a dummy -// target name in the target_field provided by the -// childPolicyConfigTargetFieldName field) -// * childPolicyConfigTargetFieldName field: -// - must be set and non-empty -func (*rlsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfgJSON := &lbConfigJSON{} - if err := json.Unmarshal(c, cfgJSON); err != nil { - return nil, fmt.Errorf("rls: json unmarshal failed for service config {%+v}: %v", string(c), err) - } - - m := jsonpb.Unmarshaler{AllowUnknownFields: true} - rlsProto := &rlspb.RouteLookupConfig{} - if err := m.Unmarshal(bytes.NewReader(cfgJSON.RouteLookupConfig), rlsProto); err != nil { - return nil, fmt.Errorf("rls: bad RouteLookupConfig proto {%+v}: %v", string(cfgJSON.RouteLookupConfig), err) - } - - var childPolicy *loadBalancingConfig - for _, lbcfg := range cfgJSON.ChildPolicy { - if balancer.Get(lbcfg.Name) != nil { - childPolicy = lbcfg - break - } - } - - kbMap, err := keys.MakeBuilderMap(rlsProto) - if err != nil { - return nil, err - } - - lookupService := rlsProto.GetLookupService() - if lookupService == "" { - return nil, fmt.Errorf("rls: empty lookup_service in service config {%+v}", string(c)) - } - parsedTarget := grpcutil.ParseTarget(lookupService, false) - if parsedTarget.Scheme == "" { - parsedTarget.Scheme = resolver.GetDefaultScheme() - } - if resolver.Get(parsedTarget.Scheme) == nil { - return nil, fmt.Errorf("rls: invalid target URI in lookup_service {%s}", lookupService) - } - - lookupServiceTimeout, err := convertDuration(rlsProto.GetLookupServiceTimeout()) - if err != nil { - return nil, fmt.Errorf("rls: failed to parse lookup_service_timeout in service config {%+v}: %v", string(c), err) - } - if lookupServiceTimeout == 0 { - lookupServiceTimeout = defaultLookupServiceTimeout - } - maxAge, err := convertDuration(rlsProto.GetMaxAge()) - if err != nil { - return nil, fmt.Errorf("rls: failed to parse max_age in service config {%+v}: %v", string(c), err) - } - staleAge, err := convertDuration(rlsProto.GetStaleAge()) - if err != nil { - return nil, fmt.Errorf("rls: failed to parse staleAge in service config {%+v}: %v", string(c), err) - } - if staleAge != 0 && maxAge == 0 { - return nil, fmt.Errorf("rls: stale_age is set, but max_age is not in service config {%+v}", string(c)) - } - if staleAge >= maxAge { - logger.Info("rls: stale_age {%v} is greater than max_age {%v}, ignoring it", staleAge, maxAge) - staleAge = 0 - } - if maxAge == 0 || maxAge > maxMaxAge { - logger.Infof("rls: max_age in service config is %v, using %v", maxAge, maxMaxAge) - maxAge = maxMaxAge - } - cacheSizeBytes := rlsProto.GetCacheSizeBytes() - if cacheSizeBytes <= 0 { - return nil, fmt.Errorf("rls: cache_size_bytes must be greater than 0 in service config {%+v}", string(c)) - } - if childPolicy == nil { - return nil, fmt.Errorf("rls: childPolicy is invalid in service config {%+v}", string(c)) - } - if cfgJSON.ChildPolicyConfigTargetFieldName == "" { - return nil, fmt.Errorf("rls: childPolicyConfigTargetFieldName field is not set in service config {%+v}", string(c)) - } - // TODO(easwars): When we start instantiating the child policy from the - // parent RLS LB policy, we could make this function a method on the - // lbConfig object and share the code. We would be parsing the child policy - // config again during that time. The only difference betweeen now and then - // would be that we would be using real targetField name instead of the - // dummy. So, we could make the targetName field a parameter to this - // function during the refactor. - cpCfg, err := validateChildPolicyConfig(childPolicy, cfgJSON.ChildPolicyConfigTargetFieldName) - if err != nil { - return nil, err - } - - return &lbConfig{ - kbMap: kbMap, - lookupService: lookupService, - lookupServiceTimeout: lookupServiceTimeout, - maxAge: maxAge, - staleAge: staleAge, - cacheSizeBytes: cacheSizeBytes, - defaultTarget: rlsProto.GetDefaultTarget(), - // TODO(easwars): Once we refactor validateChildPolicyConfig and make - // it a method on the lbConfig object, we could directly store the - // balancer.Builder and/or balancer.ConfigParser here instead of the - // Name. That would mean that we would have to create the lbConfig - // object here first before validating the childPolicy config, but - // that's a minor detail. - cpName: childPolicy.Name, - cpTargetField: cfgJSON.ChildPolicyConfigTargetFieldName, - cpConfig: cpCfg, - }, nil -} - -// validateChildPolicyConfig validates the child policy config received in the -// service config. This makes it possible for us to reject service configs -// which contain invalid child policy configs which we know will fail for sure. -// -// It does the following: -// * Unmarshals the provided child policy config into a map of string to -// json.RawMessage. This allows us to add an entry to the map corresponding -// to the targetFieldName that we received in the service config. -// * Marshals the map back into JSON, finds the config parser associated with -// the child policy and asks it to validate the config. -// * If the validation succeeded, removes the dummy entry from the map and -// returns it. If any of the above steps failed, it returns an error. -func validateChildPolicyConfig(cp *loadBalancingConfig, cpTargetField string) (map[string]json.RawMessage, error) { - var childConfig map[string]json.RawMessage - if err := json.Unmarshal(cp.Config, &childConfig); err != nil { - return nil, fmt.Errorf("rls: json unmarshal failed for child policy config {%+v}: %v", cp.Config, err) - } - childConfig[cpTargetField], _ = json.Marshal(dummyChildPolicyTarget) - - jsonCfg, err := json.Marshal(childConfig) - if err != nil { - return nil, fmt.Errorf("rls: json marshal failed for child policy config {%+v}: %v", childConfig, err) - } - builder := balancer.Get(cp.Name) - if builder == nil { - // This should never happen since we already made sure that the child - // policy name mentioned in the service config is a valid one. - return nil, fmt.Errorf("rls: balancer builder not found for child_policy %v", cp.Name) - } - parser, ok := builder.(balancer.ConfigParser) - if !ok { - return nil, fmt.Errorf("rls: balancer builder for child_policy does not implement balancer.ConfigParser: %v", cp.Name) - } - _, err = parser.ParseConfig(jsonCfg) - if err != nil { - return nil, fmt.Errorf("rls: childPolicy config validation failed: %v", err) - } - delete(childConfig, cpTargetField) - return childConfig, nil -} - -func convertDuration(d *durationpb.Duration) (time.Duration, error) { - if d == nil { - return 0, nil - } - return ptypes.Duration(d) -} diff --git a/balancer/rls/internal/keys/builder.go b/balancer/rls/internal/keys/builder.go index 5ce5a9da508a..d010f74456fe 100644 --- a/balancer/rls/internal/keys/builder.go +++ b/balancer/rls/internal/keys/builder.go @@ -25,29 +25,15 @@ import ( "sort" "strings" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" ) -// BuilderMap provides a mapping from a request path to the key builder to be -// used for that path. -// The BuilderMap is constructed by parsing the RouteLookupConfig received by -// the RLS balancer as part of its ServiceConfig, and is used by the picker in -// the data path to build the RLS keys to be used for a given request. +// BuilderMap maps from request path to the key builder for that path. type BuilderMap map[string]builder // MakeBuilderMap parses the provided RouteLookupConfig proto and returns a map // from paths to key builders. -// -// The following conditions are validated, and an error is returned if any of -// them is not met: -// grpc_keybuilders field -// * must have at least one entry -// * must not have two entries with the same Name -// * must not have any entry with a Name with the service field unset or empty -// * must not have any entries without a Name -// * must not have a headers entry that has required_match set -// * must not have two headers entries with the same key within one entry func MakeBuilderMap(cfg *rlspb.RouteLookupConfig) (BuilderMap, error) { kbs := cfg.GetGrpcKeybuilders() if len(kbs) == 0 { @@ -56,21 +42,46 @@ func MakeBuilderMap(cfg *rlspb.RouteLookupConfig) (BuilderMap, error) { bm := make(map[string]builder) for _, kb := range kbs { + // Extract keys from `headers`, `constant_keys` and `extra_keys` fields + // and populate appropriate values in the builder struct. Also ensure + // that keys are not repeated. var matchers []matcher seenKeys := make(map[string]bool) + constantKeys := kb.GetConstantKeys() + for k := range kb.GetConstantKeys() { + seenKeys[k] = true + } for _, h := range kb.GetHeaders() { if h.GetRequiredMatch() { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set {%+v}", kbs) } key := h.GetKey() if seenKeys[key] { - return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Key field in headers {%+v}", kbs) + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q across headers, constant_keys and extra_keys {%+v}", key, kbs) } seenKeys[key] = true matchers = append(matchers, matcher{key: h.GetKey(), names: h.GetNames()}) } - b := builder{matchers: matchers} + if seenKeys[kb.GetExtraKeys().GetHost()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetHost(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetService()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetService(), kbs) + } + if seenKeys[kb.GetExtraKeys().GetMethod()] { + return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key %q in extra_keys from constant_keys or headers {%+v}", kb.GetExtraKeys().GetMethod(), kbs) + } + b := builder{ + headerKeys: matchers, + constantKeys: constantKeys, + hostKey: kb.GetExtraKeys().GetHost(), + serviceKey: kb.GetExtraKeys().GetService(), + methodKey: kb.GetExtraKeys().GetMethod(), + } + // Store the builder created above in the BuilderMap based on the value + // of the `Names` field, which wraps incoming request's service and + // method. Also, ensure that there are no repeated `Names` field. names := kb.GetNames() if len(names) == 0 { return nil, fmt.Errorf("rls: GrpcKeyBuilder in RouteLookupConfig does not contain any Name {%+v}", kbs) @@ -108,16 +119,35 @@ type KeyMap struct { // RLSKey builds the RLS keys to be used for the given request, identified by // the request path and the request headers stored in metadata. -func (bm BuilderMap) RLSKey(md metadata.MD, path string) KeyMap { +func (bm BuilderMap) RLSKey(md metadata.MD, host, path string) KeyMap { + // The path passed in is of the form "/service/method". The keyBuilderMap is + // indexed with keys of the form "/service/" or "/service/method". The service + // that we set in the keyMap (to be sent out in the RLS request) should not + // include any slashes though. + i := strings.LastIndex(path, "/") + service, method := path[:i+1], path[i+1:] b, ok := bm[path] if !ok { - i := strings.LastIndex(path, "/") - b, ok = bm[path[:i+1]] + b, ok = bm[service] if !ok { return KeyMap{} } } - return b.keys(md) + + kvMap := b.buildHeaderKeys(md) + if b.hostKey != "" { + kvMap[b.hostKey] = host + } + if b.serviceKey != "" { + kvMap[b.serviceKey] = strings.Trim(service, "/") + } + if b.methodKey != "" { + kvMap[b.methodKey] = method + } + for k, v := range b.constantKeys { + kvMap[k] = v + } + return KeyMap{Map: kvMap, Str: mapToString(kvMap)} } // Equal reports whether bm and am represent equivalent BuilderMaps. @@ -141,26 +171,19 @@ func (bm BuilderMap) Equal(am BuilderMap) bool { return true } -// builder provides the actual functionality of building RLS keys. These are -// stored in the BuilderMap. -// While processing a pick, the picker looks in the BuilderMap for the -// appropriate builder to be used for the given RPC. For each of the matchers -// in the found builder, we iterate over the list of request headers (available -// as metadata in the context). Once a header matches one of the names in the -// matcher, we set the value of the header in the keyMap (with the key being -// the one found in the matcher) and move on to the next matcher. If no -// KeyBuilder was found in the map, or no header match was found, an empty -// keyMap is returned. +// builder provides the actual functionality of building RLS keys. type builder struct { - matchers []matcher + headerKeys []matcher + constantKeys map[string]string + // The following keys mirror corresponding fields in `extra_keys`. + hostKey string + serviceKey string + methodKey string } // Equal reports whether b and a represent equivalent key builders. func (b builder) Equal(a builder) bool { - if (b.matchers == nil) != (a.matchers == nil) { - return false - } - if len(b.matchers) != len(a.matchers) { + if len(b.headerKeys) != len(a.headerKeys) { return false } // Protobuf serialization maintains the order of repeated fields. Matchers @@ -168,13 +191,23 @@ func (b builder) Equal(a builder) bool { // order changes, it means that the order in the protobuf changed. We report // this case as not being equal even though the builders could possible be // functionally equal. - for i, bMatcher := range b.matchers { - aMatcher := a.matchers[i] + for i, bMatcher := range b.headerKeys { + aMatcher := a.headerKeys[i] if !bMatcher.Equal(aMatcher) { return false } } - return true + + if len(b.constantKeys) != len(a.constantKeys) { + return false + } + for k, v := range b.constantKeys { + if a.constantKeys[k] != v { + return false + } + } + + return b.hostKey == a.hostKey && b.serviceKey == a.serviceKey && b.methodKey == a.methodKey } // matcher helps extract a key from request headers based on a given name. @@ -185,14 +218,11 @@ type matcher struct { names []string } -// Equal reports if m and are are equivalent matchers. +// Equal reports if m and are are equivalent headerKeys. func (m matcher) Equal(a matcher) bool { if m.key != a.key { return false } - if (m.names == nil) != (a.names == nil) { - return false - } if len(m.names) != len(a.names) { return false } @@ -204,9 +234,12 @@ func (m matcher) Equal(a matcher) bool { return true } -func (b builder) keys(md metadata.MD) KeyMap { +func (b builder) buildHeaderKeys(md metadata.MD) map[string]string { kvMap := make(map[string]string) - for _, m := range b.matchers { + if len(md) == 0 { + return kvMap + } + for _, m := range b.headerKeys { for _, name := range m.names { if vals := md.Get(name); vals != nil { kvMap[m.key] = strings.Join(vals, ",") @@ -214,11 +247,11 @@ func (b builder) keys(md metadata.MD) KeyMap { } } } - return KeyMap{Map: kvMap, Str: mapToString(kvMap)} + return kvMap } func mapToString(kv map[string]string) string { - var keys []string + keys := make([]string, 0, len(kv)) for k := range kv { keys = append(keys, k) } diff --git a/balancer/rls/internal/keys/builder_test.go b/balancer/rls/internal/keys/builder_test.go index a5cad29e0c93..90c132bc9169 100644 --- a/balancer/rls/internal/keys/builder_test.go +++ b/balancer/rls/internal/keys/builder_test.go @@ -24,7 +24,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" ) @@ -37,6 +37,15 @@ var ( {Key: "k1", Names: []string{"n1"}}, {Key: "k2", Names: []string{"n1"}}, }, + ExtraKeys: &rlspb.GrpcKeyBuilder_ExtraKeys{ + Host: "host", + Service: "service", + Method: "method", + }, + ConstantKeys: map[string]string{ + "const-key-1": "const-val-1", + "const-key-2": "const-val-2", + }, } goodKeyBuilder2 = &rlspb.GrpcKeyBuilder{ Names: []*rlspb.GrpcKeyBuilder_Name{ @@ -50,13 +59,21 @@ var ( ) func TestMakeBuilderMap(t *testing.T) { - wantBuilderMap1 := map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}, {key: "k2", names: []string{"n1"}}}}, + gFooBuilder := builder{ + headerKeys: []matcher{{key: "k1", names: []string{"n1"}}, {key: "k2", names: []string{"n1"}}}, + constantKeys: map[string]string{ + "const-key-1": "const-val-1", + "const-key-2": "const-val-2", + }, + hostKey: "host", + serviceKey: "service", + methodKey: "method", } + wantBuilderMap1 := map[string]builder{"/gFoo/": gFooBuilder} wantBuilderMap2 := map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}, {key: "k2", names: []string{"n1"}}}}, - "/gBar/method1": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, - "/gFoobar/": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, + "/gFoo/": gFooBuilder, + "/gBar/method1": {headerKeys: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, + "/gFoobar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, } tests := []struct { @@ -91,33 +108,6 @@ func TestMakeBuilderMap(t *testing.T) { } func TestMakeBuilderMapErrors(t *testing.T) { - emptyServiceKeyBuilder := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{ - {Service: "bFoo", Method: "method1"}, - {Service: "bBar"}, - {Method: "method1"}, - }, - Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, - } - requiredMatchKeyBuilder := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "bFoo", Method: "method1"}}, - Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}, RequiredMatch: true}}, - } - repeatedHeadersKeyBuilder := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{ - {Service: "gBar", Method: "method1"}, - {Service: "gFoobar"}, - }, - Headers: []*rlspb.NameMatcher{ - {Key: "k1", Names: []string{"n1", "n2"}}, - {Key: "k1", Names: []string{"n1", "n2"}}, - }, - } - methodNameWithSlashKeyBuilder := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gBar", Method: "method1/foo"}}, - Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, - } - tests := []struct { desc string cfg *rlspb.RouteLookupConfig @@ -138,7 +128,17 @@ func TestMakeBuilderMapErrors(t *testing.T) { { desc: "GrpcKeyBuilder with empty Service field", cfg: &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{emptyServiceKeyBuilder, goodKeyBuilder1}, + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "bFoo", Method: "method1"}, + {Service: "bBar"}, + {Method: "method1"}, + }, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + }, + goodKeyBuilder1, + }, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains a Name field with no Service", }, @@ -152,21 +152,96 @@ func TestMakeBuilderMapErrors(t *testing.T) { { desc: "GrpcKeyBuilder with requiredMatch field set", cfg: &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{requiredMatchKeyBuilder, goodKeyBuilder1}, + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "bFoo", Method: "method1"}}, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}, RequiredMatch: true}}, + }, + goodKeyBuilder1, + }, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig has required_match field set", }, { desc: "GrpcKeyBuilder two headers with same key", cfg: &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{repeatedHeadersKeyBuilder, goodKeyBuilder1}, + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "gBar", Method: "method1"}, + {Service: "gFoobar"}, + }, + Headers: []*rlspb.NameMatcher{ + {Key: "k1", Names: []string{"n1", "n2"}}, + {Key: "k1", Names: []string{"n1", "n2"}}, + }, + }, + goodKeyBuilder1, + }, + }, + wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key \"k1\" across headers, constant_keys and extra_keys", + }, + { + desc: "GrpcKeyBuilder repeated keys across headers and constant_keys", + cfg: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "gBar", Method: "method1"}, + {Service: "gFoobar"}, + }, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + ConstantKeys: map[string]string{"k1": "v1"}, + }, + goodKeyBuilder1, + }, + }, + wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key \"k1\" across headers, constant_keys and extra_keys", + }, + { + desc: "GrpcKeyBuilder repeated keys across headers and extra_keys", + cfg: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "gBar", Method: "method1"}, + {Service: "gFoobar"}, + }, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + ExtraKeys: &rlspb.GrpcKeyBuilder_ExtraKeys{Method: "k1"}, + }, + goodKeyBuilder1, + }, + }, + wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key \"k1\" in extra_keys from constant_keys or headers", + }, + { + desc: "GrpcKeyBuilder repeated keys across constant_keys and extra_keys", + cfg: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + {Service: "gBar", Method: "method1"}, + {Service: "gFoobar"}, + }, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + ConstantKeys: map[string]string{"host": "v1"}, + ExtraKeys: &rlspb.GrpcKeyBuilder_ExtraKeys{Host: "host"}, + }, + goodKeyBuilder1, + }, }, - wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated Key field in headers", + wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains repeated key \"host\" in extra_keys from constant_keys or headers", }, { desc: "GrpcKeyBuilder with slash in method name", cfg: &rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{methodNameWithSlashKeyBuilder}, + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gBar", Method: "method1/foo"}}, + Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1", "n2"}}}, + }, + }, }, wantErrPrefix: "rls: GrpcKeyBuilder in RouteLookupConfig contains a method with a slash", }, @@ -257,11 +332,22 @@ func TestRLSKey(t *testing.T) { wantKM: KeyMap{Map: map[string]string{"k1": "v1"}, Str: "k1=v1"}, }, { - // Multiple matchers find hits in the provided request headers. - desc: "multipleMatchers", - path: "/gFoo/method1", - md: metadata.Pairs("n2", "v2", "n1", "v1"), - wantKM: KeyMap{Map: map[string]string{"k1": "v1", "k2": "v1"}, Str: "k1=v1,k2=v1"}, + // Multiple headerKeys find hits in the provided request headers. + desc: "multipleMatchers", + path: "/gFoo/method1", + md: metadata.Pairs("n2", "v2", "n1", "v1"), + wantKM: KeyMap{ + Map: map[string]string{ + "const-key-1": "const-val-1", + "const-key-2": "const-val-2", + "host": "dummy-host", + "service": "gFoo", + "method": "method1", + "k1": "v1", + "k2": "v1", + }, + Str: "const-key-1=const-val-1,const-key-2=const-val-2,host=dummy-host,k1=v1,k2=v1,method=method1,service=gFoo", + }, }, { // A match is found for a header which is specified multiple times. @@ -275,7 +361,7 @@ func TestRLSKey(t *testing.T) { for _, test := range tests { t.Run(test.desc, func(t *testing.T) { - if gotKM := bm.RLSKey(test.md, test.path); !cmp.Equal(gotKM, test.wantKM) { + if gotKM := bm.RLSKey(test.md, "dummy-host", test.path); !cmp.Equal(gotKM, test.wantKM) { t.Errorf("RLSKey(%+v, %s) = %+v, want %+v", test.md, test.path, gotKM, test.wantKM) } }) @@ -351,57 +437,57 @@ func TestBuilderMapEqual(t *testing.T) { { desc: "nil and non-nil builder maps", a: nil, - b: map[string]builder{"/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}}, + b: map[string]builder{"/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}}, wantEqual: false, }, { desc: "empty and non-empty builder maps", a: make(map[string]builder), - b: map[string]builder{"/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}}, + b: map[string]builder{"/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}}, wantEqual: false, }, { desc: "different number of map keys", a: map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "different map keys", a: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "equal keys different values", a: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1", "n2"}}}}, }, b: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: false, }, { desc: "good match", a: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, b: map[string]builder{ - "/gBar/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, - "/gFoo/": {matchers: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gBar/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, + "/gFoo/": {headerKeys: []matcher{{key: "k1", names: []string{"n1"}}}}, }, wantEqual: true, }, @@ -425,44 +511,80 @@ func TestBuilderEqual(t *testing.T) { }{ { desc: "nil builders", - a: builder{matchers: nil}, - b: builder{matchers: nil}, + a: builder{headerKeys: nil}, + b: builder{headerKeys: nil}, wantEqual: true, }, { desc: "empty builders", - a: builder{matchers: []matcher{}}, - b: builder{matchers: []matcher{}}, + a: builder{headerKeys: []matcher{}}, + b: builder{headerKeys: []matcher{}}, wantEqual: true, }, { - desc: "nil and non-nil builders", - a: builder{matchers: nil}, - b: builder{matchers: []matcher{}}, + desc: "empty and non-empty builders", + a: builder{headerKeys: []matcher{}}, + b: builder{headerKeys: []matcher{{key: "foo"}}}, wantEqual: false, }, { - desc: "empty and non-empty builders", - a: builder{matchers: []matcher{}}, - b: builder{matchers: []matcher{{key: "foo"}}}, + desc: "different number of headerKeys", + a: builder{headerKeys: []matcher{{key: "foo"}, {key: "bar"}}}, + b: builder{headerKeys: []matcher{{key: "foo"}}}, wantEqual: false, }, { - desc: "different number of matchers", - a: builder{matchers: []matcher{{key: "foo"}, {key: "bar"}}}, - b: builder{matchers: []matcher{{key: "foo"}}}, + desc: "equal number but differing headerKeys", + a: builder{headerKeys: []matcher{{key: "bar"}}}, + b: builder{headerKeys: []matcher{{key: "foo"}}}, wantEqual: false, }, { - desc: "equal number but differing matchers", - a: builder{matchers: []matcher{{key: "bar"}}}, - b: builder{matchers: []matcher{{key: "foo"}}}, + desc: "different number of constantKeys", + a: builder{constantKeys: map[string]string{"k1": "v1"}}, + b: builder{constantKeys: map[string]string{"k1": "v1", "k2": "v2"}}, wantEqual: false, }, { - desc: "good match", - a: builder{matchers: []matcher{{key: "foo"}}}, - b: builder{matchers: []matcher{{key: "foo"}}}, + desc: "equal number but differing constantKeys", + a: builder{constantKeys: map[string]string{"k1": "v1"}}, + b: builder{constantKeys: map[string]string{"k2": "v2"}}, + wantEqual: false, + }, + { + desc: "different hostKey", + a: builder{hostKey: "host1"}, + b: builder{hostKey: "host2"}, + wantEqual: false, + }, + { + desc: "different serviceKey", + a: builder{hostKey: "service1"}, + b: builder{hostKey: "service2"}, + wantEqual: false, + }, + { + desc: "different methodKey", + a: builder{hostKey: "method1"}, + b: builder{hostKey: "method2"}, + wantEqual: false, + }, + { + desc: "equal", + a: builder{ + headerKeys: []matcher{{key: "foo"}}, + constantKeys: map[string]string{"k1": "v1"}, + hostKey: "host", + serviceKey: "/service/", + methodKey: "method", + }, + b: builder{ + headerKeys: []matcher{{key: "foo"}}, + constantKeys: map[string]string{"k1": "v1"}, + hostKey: "host", + serviceKey: "/service/", + methodKey: "method", + }, wantEqual: true, }, } diff --git a/balancer/rls/internal/picker.go b/balancer/rls/internal/picker.go deleted file mode 100644 index 738449446558..000000000000 --- a/balancer/rls/internal/picker.go +++ /dev/null @@ -1,149 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "errors" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/rls/internal/cache" - "google.golang.org/grpc/balancer/rls/internal/keys" - "google.golang.org/grpc/metadata" -) - -var errRLSThrottled = errors.New("RLS call throttled at client side") - -// RLS rlsPicker selects the subConn to be used for a particular RPC. It does -// not manage subConns directly and usually deletegates to pickers provided by -// child policies. -// -// The RLS LB policy creates a new rlsPicker object whenever its ServiceConfig -// is updated and provides a bunch of hooks for the rlsPicker to get the latest -// state that it can used to make its decision. -type rlsPicker struct { - // The keyBuilder map used to generate RLS keys for the RPC. This is built - // by the LB policy based on the received ServiceConfig. - kbm keys.BuilderMap - - // The following hooks are setup by the LB policy to enable the rlsPicker to - // access state stored in the policy. This approach has the following - // advantages: - // 1. The rlsPicker is loosely coupled with the LB policy in the sense that - // updates happening on the LB policy like the receipt of an RLS - // response, or an update to the default rlsPicker etc are not explicitly - // pushed to the rlsPicker, but are readily available to the rlsPicker - // when it invokes these hooks. And the LB policy takes care of - // synchronizing access to these shared state. - // 2. It makes unit testing the rlsPicker easy since any number of these - // hooks could be overridden. - - // readCache is used to read from the data cache and the pending request - // map in an atomic fashion. The first return parameter is the entry in the - // data cache, and the second indicates whether an entry for the same key - // is present in the pending cache. - readCache func(cache.Key) (*cache.Entry, bool) - // shouldThrottle decides if the current RPC should be throttled at the - // client side. It uses an adaptive throttling algorithm. - shouldThrottle func() bool - // startRLS kicks off an RLS request in the background for the provided RPC - // path and keyMap. An entry in the pending request map is created before - // sending out the request and an entry in the data cache is created or - // updated upon receipt of a response. See implementation in the LB policy - // for details. - startRLS func(string, keys.KeyMap) - // defaultPick enables the rlsPicker to delegate the pick decision to the - // rlsPicker returned by the child LB policy pointing to the default target - // specified in the service config. - defaultPick func(balancer.PickInfo) (balancer.PickResult, error) -} - -// Pick makes the routing decision for every outbound RPC. -func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - // For every incoming request, we first build the RLS keys using the - // keyBuilder we received from the LB policy. If no metadata is present in - // the context, we end up using an empty key. - km := keys.KeyMap{} - md, ok := metadata.FromOutgoingContext(info.Ctx) - if ok { - km = p.kbm.RLSKey(md, info.FullMethodName) - } - - // We use the LB policy hook to read the data cache and the pending request - // map (whether or not an entry exists) for the RPC path and the generated - // RLS keys. We will end up kicking off an RLS request only if there is no - // pending request for the current RPC path and keys, and either we didn't - // find an entry in the data cache or the entry was stale and it wasn't in - // backoff. - startRequest := false - now := time.Now() - entry, pending := p.readCache(cache.Key{Path: info.FullMethodName, KeyMap: km.Str}) - if entry == nil { - startRequest = true - } else { - entry.Mu.Lock() - defer entry.Mu.Unlock() - if entry.StaleTime.Before(now) && entry.BackoffTime.Before(now) { - // This is the proactive cache refresh. - startRequest = true - } - } - - if startRequest && !pending { - if p.shouldThrottle() { - // The entry doesn't exist or has expired and the new RLS request - // has been throttled. Treat it as an error and delegate to default - // pick, if one exists, or fail the pick. - if entry == nil || entry.ExpiryTime.Before(now) { - if p.defaultPick != nil { - return p.defaultPick(info) - } - return balancer.PickResult{}, errRLSThrottled - } - // The proactive refresh has been throttled. Nothing to worry, just - // keep using the existing entry. - } else { - p.startRLS(info.FullMethodName, km) - } - } - - if entry != nil { - if entry.ExpiryTime.After(now) { - // This is the jolly good case where we have found a valid entry in - // the data cache. We delegate to the LB policy associated with - // this cache entry. - return entry.ChildPicker.Pick(info) - } else if entry.BackoffTime.After(now) { - // The entry has expired, but is in backoff. We delegate to the - // default pick, if one exists, or return the error from the last - // failed RLS request for this entry. - if p.defaultPick != nil { - return p.defaultPick(info) - } - return balancer.PickResult{}, entry.CallStatus - } - } - - // We get here only in the following cases: - // * No data cache entry or expired entry, RLS request sent out - // * No valid data cache entry and Pending cache entry exists - // We need to queue to pick which will be handled once the RLS response is - // received. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable -} diff --git a/balancer/rls/internal/picker_test.go b/balancer/rls/internal/picker_test.go deleted file mode 100644 index 762eb5fd80e9..000000000000 --- a/balancer/rls/internal/picker_test.go +++ /dev/null @@ -1,615 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package rls - -import ( - "context" - "errors" - "fmt" - "math" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/rls/internal/cache" - "google.golang.org/grpc/balancer/rls/internal/keys" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/metadata" -) - -const defaultTestMaxAge = 5 * time.Second - -// initKeyBuilderMap initializes a keyBuilderMap of the form: -// { -// "gFoo": "k1=n1", -// "gBar/method1": "k2=n21,n22" -// "gFoobar": "k3=n3", -// } -func initKeyBuilderMap() (keys.BuilderMap, error) { - kb1 := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gFoo"}}, - Headers: []*rlspb.NameMatcher{{Key: "k1", Names: []string{"n1"}}}, - } - kb2 := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gBar", Method: "method1"}}, - Headers: []*rlspb.NameMatcher{{Key: "k2", Names: []string{"n21", "n22"}}}, - } - kb3 := &rlspb.GrpcKeyBuilder{ - Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "gFoobar"}}, - Headers: []*rlspb.NameMatcher{{Key: "k3", Names: []string{"n3"}}}, - } - return keys.MakeBuilderMap(&rlspb.RouteLookupConfig{ - GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{kb1, kb2, kb3}, - }) -} - -// fakeSubConn embeds the balancer.SubConn interface and contains an id which -// helps verify that the expected subConn was returned by the rlsPicker. -type fakeSubConn struct { - balancer.SubConn - id int -} - -// fakePicker sends a PickResult with a fakeSubConn with the configured id. -type fakePicker struct { - id int -} - -func (p *fakePicker) Pick(_ balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{SubConn: &fakeSubConn{id: p.id}}, nil -} - -// newFakePicker returns a fakePicker configured with a random ID. The subConns -// returned by this picker are of type fakefakeSubConn, and contain the same -// random ID, which tests can use to verify. -func newFakePicker() *fakePicker { - return &fakePicker{id: grpcrand.Intn(math.MaxInt32)} -} - -func verifySubConn(sc balancer.SubConn, wantID int) error { - fsc, ok := sc.(*fakeSubConn) - if !ok { - return fmt.Errorf("Pick() returned a SubConn of type %T, want %T", sc, &fakeSubConn{}) - } - if fsc.id != wantID { - return fmt.Errorf("Pick() returned SubConn %d, want %d", fsc.id, wantID) - } - return nil -} - -// TestPickKeyBuilder verifies the different possible scenarios for forming an -// RLS key for an incoming RPC. -func TestPickKeyBuilder(t *testing.T) { - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - - tests := []struct { - desc string - rpcPath string - md metadata.MD - wantKey cache.Key - }{ - { - desc: "non existent service in keyBuilder map", - rpcPath: "/gNonExistentService/method", - md: metadata.New(map[string]string{"n1": "v1", "n3": "v3"}), - wantKey: cache.Key{Path: "/gNonExistentService/method", KeyMap: ""}, - }, - { - desc: "no metadata in incoming context", - rpcPath: "/gFoo/method", - md: metadata.MD{}, - wantKey: cache.Key{Path: "/gFoo/method", KeyMap: ""}, - }, - { - desc: "keyBuilderMatch", - rpcPath: "/gFoo/method", - md: metadata.New(map[string]string{"n1": "v1", "n3": "v3"}), - wantKey: cache.Key{Path: "/gFoo/method", KeyMap: "k1=v1"}, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - randID := grpcrand.Intn(math.MaxInt32) - p := rlsPicker{ - kbm: kbm, - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, test.wantKey) { - t.Fatalf("rlsPicker using cacheKey %v, want %v", key, test.wantKey) - } - - now := time.Now() - return &cache.Entry{ - ExpiryTime: now.Add(defaultTestMaxAge), - StaleTime: now.Add(defaultTestMaxAge), - // Cache entry is configured with a child policy whose - // rlsPicker always returns an empty PickResult and nil - // error. - ChildPicker: &fakePicker{id: randID}, - }, false - }, - // The other hooks are not set here because they are not expected to be - // invoked for these cases and if they get invoked, they will panic. - } - - gotResult, err := p.Pick(balancer.PickInfo{ - FullMethodName: test.rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), test.md), - }) - if err != nil { - t.Fatalf("Pick() failed with error: %v", err) - } - sc, ok := gotResult.SubConn.(*fakeSubConn) - if !ok { - t.Fatalf("Pick() returned a SubConn of type %T, want %T", gotResult.SubConn, &fakeSubConn{}) - } - if sc.id != randID { - t.Fatalf("Pick() returned SubConn %d, want %d", sc.id, randID) - } - }) - } -} - -// TestPick_DataCacheMiss_PendingCacheMiss verifies different Pick scenarios -// where the entry is neither found in the data cache nor in the pending cache. -func TestPick_DataCacheMiss_PendingCacheMiss(t *testing.T) { - const ( - rpcPath = "/gFoo/method" - wantKeyMapStr = "k1=v1" - ) - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) - wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} - - tests := []struct { - desc string - // Whether or not a default target is configured. - defaultPickExists bool - // Whether or not the RLS request should be throttled. - throttle bool - // Whether or not the test is expected to make a new RLS request. - wantRLSRequest bool - // Expected error returned by the rlsPicker under test. - wantErr error - }{ - { - desc: "rls request throttled with default pick", - defaultPickExists: true, - throttle: true, - }, - { - desc: "rls request throttled without default pick", - throttle: true, - wantErr: errRLSThrottled, - }, - { - desc: "rls request not throttled", - wantRLSRequest: true, - wantErr: balancer.ErrNoSubConnAvailable, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - rlsCh := testutils.NewChannel() - defaultPicker := newFakePicker() - - p := rlsPicker{ - kbm: kbm, - // Cache lookup fails, no pending entry. - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, wantKey) { - t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) - } - return nil, false - }, - shouldThrottle: func() bool { return test.throttle }, - startRLS: func(path string, km keys.KeyMap) { - if !test.wantRLSRequest { - rlsCh.Send(errors.New("RLS request attempted when none was expected")) - return - } - if path != rpcPath { - rlsCh.Send(fmt.Errorf("RLS request initiated for rpcPath %s, want %s", path, rpcPath)) - return - } - if km.Str != wantKeyMapStr { - rlsCh.Send(fmt.Errorf("RLS request initiated with keys %v, want %v", km.Str, wantKeyMapStr)) - return - } - rlsCh.Send(nil) - }, - } - if test.defaultPickExists { - p.defaultPick = defaultPicker.Pick - } - - gotResult, err := p.Pick(balancer.PickInfo{ - FullMethodName: rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), md), - }) - if err != test.wantErr { - t.Fatalf("Pick() returned error {%v}, want {%v}", err, test.wantErr) - } - // If the test specified that a new RLS request should be made, - // verify it. - if test.wantRLSRequest { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if rlsErr, err := rlsCh.Receive(ctx); err != nil || rlsErr != nil { - t.Fatalf("startRLS() = %v, error receiving from channel: %v", rlsErr, err) - } - } - if test.wantErr != nil { - return - } - - // We get here only for cases where we expect the pick to be - // delegated to the default picker. - if err := verifySubConn(gotResult.SubConn, defaultPicker.id); err != nil { - t.Fatal(err) - } - }) - } -} - -// TestPick_DataCacheMiss_PendingCacheMiss verifies different Pick scenarios -// where the entry is not found in the data cache, but there is a entry in the -// pending cache. For all of these scenarios, no new RLS request will be sent. -func TestPick_DataCacheMiss_PendingCacheHit(t *testing.T) { - const ( - rpcPath = "/gFoo/method" - wantKeyMapStr = "k1=v1" - ) - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) - wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} - - tests := []struct { - desc string - defaultPickExists bool - }{ - { - desc: "default pick exists", - defaultPickExists: true, - }, - { - desc: "default pick does not exists", - }, - } - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - rlsCh := testutils.NewChannel() - p := rlsPicker{ - kbm: kbm, - // Cache lookup fails, pending entry exists. - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, wantKey) { - t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) - } - return nil, true - }, - // Never throttle. We do not expect an RLS request to be sent out anyways. - shouldThrottle: func() bool { return false }, - startRLS: func(_ string, _ keys.KeyMap) { - rlsCh.Send(nil) - }, - } - if test.defaultPickExists { - p.defaultPick = func(info balancer.PickInfo) (balancer.PickResult, error) { - // We do not expect the default picker to be invoked at all. - // So, if we get here, the test will fail, because it - // expects the pick to be queued. - return balancer.PickResult{}, nil - } - } - - if _, err := p.Pick(balancer.PickInfo{ - FullMethodName: rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), md), - }); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("Pick() returned error {%v}, want {%v}", err, balancer.ErrNoSubConnAvailable) - } - - // Make sure that no RLS request was sent out. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := rlsCh.Receive(ctx); err != context.DeadlineExceeded { - t.Fatalf("RLS request sent out when pending entry exists") - } - }) - } -} - -// TestPick_DataCacheHit_PendingCacheMiss verifies different Pick scenarios -// where the entry is found in the data cache, and there is no entry in the -// pending cache. This includes cases where the entry in the data cache is -// stale, expired or in backoff. -func TestPick_DataCacheHit_PendingCacheMiss(t *testing.T) { - const ( - rpcPath = "/gFoo/method" - wantKeyMapStr = "k1=v1" - ) - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) - wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} - rlsLastErr := errors.New("last RLS request failed") - - tests := []struct { - desc string - // The cache entry, as returned by the overridden readCache hook. - cacheEntry *cache.Entry - // Whether or not a default target is configured. - defaultPickExists bool - // Whether or not the RLS request should be throttled. - throttle bool - // Whether or not the test is expected to make a new RLS request. - wantRLSRequest bool - // Whether or not the rlsPicker should delegate to the child picker. - wantChildPick bool - // Whether or not the rlsPicker should delegate to the default picker. - wantDefaultPick bool - // Expected error returned by the rlsPicker under test. - wantErr error - }{ - { - desc: "valid entry", - cacheEntry: &cache.Entry{ - ExpiryTime: time.Now().Add(defaultTestMaxAge), - StaleTime: time.Now().Add(defaultTestMaxAge), - }, - wantChildPick: true, - }, - { - desc: "entryStale_requestThrottled", - cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, - throttle: true, - wantChildPick: true, - }, - { - desc: "entryStale_requestNotThrottled", - cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, - wantRLSRequest: true, - wantChildPick: true, - }, - { - desc: "entryExpired_requestThrottled_defaultPickExists", - cacheEntry: &cache.Entry{}, - throttle: true, - defaultPickExists: true, - wantDefaultPick: true, - }, - { - desc: "entryExpired_requestThrottled_defaultPickNotExists", - cacheEntry: &cache.Entry{}, - throttle: true, - wantErr: errRLSThrottled, - }, - { - desc: "entryExpired_requestNotThrottled", - cacheEntry: &cache.Entry{}, - wantRLSRequest: true, - wantErr: balancer.ErrNoSubConnAvailable, - }, - { - desc: "entryExpired_backoffNotExpired_defaultPickExists", - cacheEntry: &cache.Entry{ - BackoffTime: time.Now().Add(defaultTestMaxAge), - CallStatus: rlsLastErr, - }, - defaultPickExists: true, - }, - { - desc: "entryExpired_backoffNotExpired_defaultPickNotExists", - cacheEntry: &cache.Entry{ - BackoffTime: time.Now().Add(defaultTestMaxAge), - CallStatus: rlsLastErr, - }, - wantErr: rlsLastErr, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - rlsCh := testutils.NewChannel() - childPicker := newFakePicker() - defaultPicker := newFakePicker() - - p := rlsPicker{ - kbm: kbm, - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, wantKey) { - t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) - } - test.cacheEntry.ChildPicker = childPicker - return test.cacheEntry, false - }, - shouldThrottle: func() bool { return test.throttle }, - startRLS: func(path string, km keys.KeyMap) { - if !test.wantRLSRequest { - rlsCh.Send(errors.New("RLS request attempted when none was expected")) - return - } - if path != rpcPath { - rlsCh.Send(fmt.Errorf("RLS request initiated for rpcPath %s, want %s", path, rpcPath)) - return - } - if km.Str != wantKeyMapStr { - rlsCh.Send(fmt.Errorf("RLS request initiated with keys %v, want %v", km.Str, wantKeyMapStr)) - return - } - rlsCh.Send(nil) - }, - } - if test.defaultPickExists { - p.defaultPick = defaultPicker.Pick - } - - gotResult, err := p.Pick(balancer.PickInfo{ - FullMethodName: rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), md), - }) - if err != test.wantErr { - t.Fatalf("Pick() returned error {%v}, want {%v}", err, test.wantErr) - } - // If the test specified that a new RLS request should be made, - // verify it. - if test.wantRLSRequest { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if rlsErr, err := rlsCh.Receive(ctx); err != nil || rlsErr != nil { - t.Fatalf("startRLS() = %v, error receiving from channel: %v", rlsErr, err) - } - } - if test.wantErr != nil { - return - } - - // We get here only for cases where we expect the pick to be - // delegated to the child picker or the default picker. - if test.wantChildPick { - if err := verifySubConn(gotResult.SubConn, childPicker.id); err != nil { - t.Fatal(err) - } - - } - if test.wantDefaultPick { - if err := verifySubConn(gotResult.SubConn, defaultPicker.id); err != nil { - t.Fatal(err) - } - } - }) - } -} - -// TestPick_DataCacheHit_PendingCacheHit verifies different Pick scenarios where -// the entry is found both in the data cache and in the pending cache. This -// mostly verifies cases where the entry is stale, but there is already a -// pending RLS request, so no new request should be sent out. -func TestPick_DataCacheHit_PendingCacheHit(t *testing.T) { - const ( - rpcPath = "/gFoo/method" - wantKeyMapStr = "k1=v1" - ) - kbm, err := initKeyBuilderMap() - if err != nil { - t.Fatalf("Failed to create keyBuilderMap: %v", err) - } - md := metadata.New(map[string]string{"n1": "v1", "n3": "v3"}) - wantKey := cache.Key{Path: rpcPath, KeyMap: wantKeyMapStr} - - tests := []struct { - desc string - // The cache entry, as returned by the overridden readCache hook. - cacheEntry *cache.Entry - // Whether or not a default target is configured. - defaultPickExists bool - // Expected error returned by the rlsPicker under test. - wantErr error - }{ - { - desc: "stale entry", - cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, - }, - { - desc: "stale entry with default picker", - cacheEntry: &cache.Entry{ExpiryTime: time.Now().Add(defaultTestMaxAge)}, - defaultPickExists: true, - }, - { - desc: "entryExpired_defaultPickExists", - cacheEntry: &cache.Entry{}, - defaultPickExists: true, - wantErr: balancer.ErrNoSubConnAvailable, - }, - { - desc: "entryExpired_defaultPickNotExists", - cacheEntry: &cache.Entry{}, - wantErr: balancer.ErrNoSubConnAvailable, - }, - } - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - rlsCh := testutils.NewChannel() - childPicker := newFakePicker() - - p := rlsPicker{ - kbm: kbm, - readCache: func(key cache.Key) (*cache.Entry, bool) { - if !cmp.Equal(key, wantKey) { - t.Fatalf("cache lookup using cacheKey %v, want %v", key, wantKey) - } - test.cacheEntry.ChildPicker = childPicker - return test.cacheEntry, true - }, - // Never throttle. We do not expect an RLS request to be sent out anyways. - shouldThrottle: func() bool { return false }, - startRLS: func(path string, km keys.KeyMap) { - rlsCh.Send(nil) - }, - } - if test.defaultPickExists { - p.defaultPick = func(info balancer.PickInfo) (balancer.PickResult, error) { - // We do not expect the default picker to be invoked at all. - // So, if we get here, we return an error. - return balancer.PickResult{}, errors.New("default picker invoked when expecting a child pick") - } - } - - gotResult, err := p.Pick(balancer.PickInfo{ - FullMethodName: rpcPath, - Ctx: metadata.NewOutgoingContext(context.Background(), md), - }) - if err != test.wantErr { - t.Fatalf("Pick() returned error {%v}, want {%v}", err, test.wantErr) - } - // Make sure that no RLS request was sent out. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := rlsCh.Receive(ctx); err != context.DeadlineExceeded { - t.Fatalf("RLS request sent out when pending entry exists") - } - if test.wantErr != nil { - return - } - - // We get here only for cases where we expect the pick to be - // delegated to the child picker. - if err := verifySubConn(gotResult.SubConn, childPicker.id); err != nil { - t.Fatal(err) - } - }) - } -} diff --git a/balancer/rls/internal/test/e2e/e2e.go b/balancer/rls/internal/test/e2e/e2e.go new file mode 100644 index 000000000000..7b8a8bbde138 --- /dev/null +++ b/balancer/rls/internal/test/e2e/e2e.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package e2e contains utilities for end-to-end RouteLookupService tests. +package e2e diff --git a/balancer/rls/internal/test/e2e/rls_child_policy.go b/balancer/rls/internal/test/e2e/rls_child_policy.go new file mode 100644 index 000000000000..5a6e3e69175a --- /dev/null +++ b/balancer/rls/internal/test/e2e/rls_child_policy.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const ( + // RLSChildPolicyTargetNameField is a top-level field name to add to the child + // policy's config, whose value is set to the target for the child policy. + RLSChildPolicyTargetNameField = "Backend" + // RLSChildPolicyBadTarget is a value which is considered a bad target by the + // child policy. This is useful to test bad child policy configuration. + RLSChildPolicyBadTarget = "bad-target" +) + +// ErrParseConfigBadTarget is the error returned from ParseConfig when the +// backend field is set to RLSChildPolicyBadTarget. +var ErrParseConfigBadTarget = errors.New("backend field set to RLSChildPolicyBadTarget") + +// BalancerFuncs is a set of callbacks which get invoked when the corresponding +// method on the child policy is invoked. +type BalancerFuncs struct { + UpdateClientConnState func(cfg *RLSChildPolicyConfig) error + Close func() +} + +// RegisterRLSChildPolicy registers a balancer builder with the given name, to +// be used as a child policy for the RLS LB policy. +// +// The child policy uses a pickfirst balancer under the hood to send all traffic +// to the single backend specified by the `RLSChildPolicyTargetNameField` field +// in its configuration which looks like: {"Backend": "Backend-address"}. +func RegisterRLSChildPolicy(name string, bf *BalancerFuncs) { + balancer.Register(bb{name: name, bf: bf}) +} + +type bb struct { + name string + bf *BalancerFuncs +} + +func (bb bb) Name() string { return bb.name } + +func (bb bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + pf := balancer.Get(grpc.PickFirstBalancerName) + b := &bal{ + Balancer: pf.Build(cc, opts), + bf: bb.bf, + done: grpcsync.NewEvent(), + } + go b.run() + return b +} + +func (bb bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &RLSChildPolicyConfig{} + if err := json.Unmarshal(c, cfg); err != nil { + return nil, err + } + if cfg.Backend == RLSChildPolicyBadTarget { + return nil, ErrParseConfigBadTarget + } + return cfg, nil +} + +type bal struct { + balancer.Balancer + bf *BalancerFuncs + done *grpcsync.Event +} + +// RLSChildPolicyConfig is the LB config for the test child policy. +type RLSChildPolicyConfig struct { + serviceconfig.LoadBalancingConfig + Backend string // The target for which this child policy was created. + Random string // A random field to test child policy config changes. +} + +func (b *bal) UpdateClientConnState(c balancer.ClientConnState) error { + cfg, ok := c.BalancerConfig.(*RLSChildPolicyConfig) + if !ok { + return fmt.Errorf("received balancer config of type %T, want %T", c.BalancerConfig, &RLSChildPolicyConfig{}) + } + if b.bf != nil && b.bf.UpdateClientConnState != nil { + b.bf.UpdateClientConnState(cfg) + } + return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{{Addr: cfg.Backend}}}, + }) +} + +func (b *bal) Close() { + b.Balancer.Close() + if b.bf != nil && b.bf.Close != nil { + b.bf.Close() + } + b.done.Fire() +} + +// run is a dummy goroutine to make sure that child policies are closed at the +// end of tests. If they are not closed, these goroutines will be picked up by +// the leakcheker and tests will fail. +func (b *bal) run() { + <-b.done.Done() +} diff --git a/balancer/rls/internal/test/e2e/rls_lb_config.go b/balancer/rls/internal/test/e2e/rls_lb_config.go new file mode 100644 index 000000000000..0a5993d795c0 --- /dev/null +++ b/balancer/rls/internal/test/e2e/rls_lb_config.go @@ -0,0 +1,103 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" + + "google.golang.org/protobuf/encoding/protojson" +) + +// RLSConfig is a utility type to build service config for the RLS LB policy. +type RLSConfig struct { + RouteLookupConfig *rlspb.RouteLookupConfig + RouteLookupChannelServiceConfig string + ChildPolicy *internalserviceconfig.BalancerConfig + ChildPolicyConfigTargetFieldName string +} + +// ServiceConfigJSON generates service config with a load balancing config +// corresponding to the RLS LB policy. +func (c *RLSConfig) ServiceConfigJSON() (string, error) { + m := protojson.MarshalOptions{ + Multiline: true, + Indent: " ", + UseProtoNames: true, + } + routeLookupCfg, err := m.Marshal(c.RouteLookupConfig) + if err != nil { + return "", err + } + childPolicy, err := c.ChildPolicy.MarshalJSON() + if err != nil { + return "", err + } + + return fmt.Sprintf(` +{ + "loadBalancingConfig": [ + { + "rls_experimental": { + "routeLookupConfig": %s, + "routeLookupChannelServiceConfig": %s, + "childPolicy": %s, + "childPolicyConfigTargetFieldName": %q + } + } + ] +}`, string(routeLookupCfg), c.RouteLookupChannelServiceConfig, string(childPolicy), c.ChildPolicyConfigTargetFieldName), nil +} + +// LoadBalancingConfig generates load balancing config which can used as part of +// a ClientConnState update to the RLS LB policy. +func (c *RLSConfig) LoadBalancingConfig() (serviceconfig.LoadBalancingConfig, error) { + m := protojson.MarshalOptions{ + Multiline: true, + Indent: " ", + UseProtoNames: true, + } + routeLookupCfg, err := m.Marshal(c.RouteLookupConfig) + if err != nil { + return nil, err + } + childPolicy, err := c.ChildPolicy.MarshalJSON() + if err != nil { + return nil, err + } + lbConfigJSON := fmt.Sprintf(` +{ + "routeLookupConfig": %s, + "routeLookupChannelServiceConfig": %s, + "childPolicy": %s, + "childPolicyConfigTargetFieldName": %q +}`, string(routeLookupCfg), c.RouteLookupChannelServiceConfig, string(childPolicy), c.ChildPolicyConfigTargetFieldName) + + builder := balancer.Get("rls_experimental") + if builder == nil { + return nil, errors.New("balancer builder not found for RLS LB policy") + } + parser := builder.(balancer.ConfigParser) + return parser.ParseConfig([]byte(lbConfigJSON)) +} diff --git a/balancer/rls/internal/testutils/fakeserver/fakeserver.go b/balancer/rls/internal/testutils/fakeserver/fakeserver.go deleted file mode 100644 index 479e3036468f..000000000000 --- a/balancer/rls/internal/testutils/fakeserver/fakeserver.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package fakeserver provides a fake implementation of the RouteLookupService, -// to be used in unit tests. -package fakeserver - -import ( - "context" - "errors" - "fmt" - "net" - "time" - - "google.golang.org/grpc" - rlsgrpc "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" - rlspb "google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1" - "google.golang.org/grpc/internal/testutils" -) - -const ( - defaultDialTimeout = 5 * time.Second - defaultRPCTimeout = 5 * time.Second - defaultChannelBufferSize = 50 -) - -// Response wraps the response protobuf (xds/LRS) and error that the Server -// should send out to the client through a call to stream.Send() -type Response struct { - Resp *rlspb.RouteLookupResponse - Err error -} - -// Server is a fake implementation of RLS. It exposes channels to send/receive -// RLS requests and responses. -type Server struct { - rlsgrpc.UnimplementedRouteLookupServiceServer - RequestChan *testutils.Channel - ResponseChan chan Response - Address string -} - -// Start makes a new Server which uses the provided net.Listener. If lis is nil, -// it creates a new net.Listener on a local port. The returned cancel function -// should be invoked by the caller upon completion of the test. -func Start(lis net.Listener, opts ...grpc.ServerOption) (*Server, func(), error) { - if lis == nil { - var err error - lis, err = net.Listen("tcp", "localhost:0") - if err != nil { - return nil, func() {}, fmt.Errorf("net.Listen() failed: %v", err) - } - } - s := &Server{ - // Give the channels a buffer size of 1 so that we can setup - // expectations for one lookup call, without blocking. - RequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - ResponseChan: make(chan Response, 1), - Address: lis.Addr().String(), - } - - server := grpc.NewServer(opts...) - rlsgrpc.RegisterRouteLookupServiceServer(server, s) - go server.Serve(lis) - - return s, func() { server.Stop() }, nil -} - -// RouteLookup implements the RouteLookupService. -func (s *Server) RouteLookup(ctx context.Context, req *rlspb.RouteLookupRequest) (*rlspb.RouteLookupResponse, error) { - s.RequestChan.Send(req) - - // The leakchecker fails if we don't exit out of here in a reasonable time. - timer := time.NewTimer(defaultRPCTimeout) - select { - case <-timer.C: - return nil, errors.New("default RPC timeout exceeded") - case resp := <-s.ResponseChan: - timer.Stop() - return resp.Resp, resp.Err - } -} - -// ClientConn returns a grpc.ClientConn connected to the fakeServer. -func (s *Server) ClientConn() (*grpc.ClientConn, func(), error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultDialTimeout) - defer cancel() - - cc, err := grpc.DialContext(ctx, s.Address, grpc.WithInsecure(), grpc.WithBlock()) - if err != nil { - return nil, nil, fmt.Errorf("grpc.DialContext(%s) failed: %v", s.Address, err) - } - return cc, func() { cc.Close() }, nil -} diff --git a/balancer/rls/picker.go b/balancer/rls/picker.go new file mode 100644 index 000000000000..c2d972739689 --- /dev/null +++ b/balancer/rls/picker.go @@ -0,0 +1,331 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "errors" + "fmt" + "strings" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/rls/internal/keys" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + errRLSThrottled = errors.New("RLS call throttled at client side") + + // Function to compute data cache entry size. + computeDataCacheEntrySize = dcEntrySize +) + +// exitIdler wraps the only method on the BalancerGroup that the picker calls. +type exitIdler interface { + ExitIdleOne(id string) +} + +// rlsPicker selects the subConn to be used for a particular RPC. It does not +// manage subConns directly and delegates to pickers provided by child policies. +type rlsPicker struct { + // The keyBuilder map used to generate RLS keys for the RPC. This is built + // by the LB policy based on the received ServiceConfig. + kbm keys.BuilderMap + // Endpoint from the user's original dial target. Used to set the `host_key` + // field in `extra_keys`. + origEndpoint string + + lb *rlsBalancer + + // The picker is given its own copy of the below fields from the RLS LB policy + // to avoid having to grab the mutex on the latter. + defaultPolicy *childPolicyWrapper // Child policy for the default target. + ctrlCh *controlChannel // Control channel to the RLS server. + maxAge time.Duration // Cache max age from LB config. + staleAge time.Duration // Cache stale age from LB config. + bg exitIdler + logger *internalgrpclog.PrefixLogger +} + +// isFullMethodNameValid return true if name is of the form `/service/method`. +func isFullMethodNameValid(name string) bool { + return strings.HasPrefix(name, "/") && strings.Count(name, "/") == 2 +} + +// Pick makes the routing decision for every outbound RPC. +func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + if name := info.FullMethodName; !isFullMethodNameValid(name) { + return balancer.PickResult{}, fmt.Errorf("rls: method name %q is not of the form '/service/method", name) + } + + // Build the request's keys using the key builders from LB config. + md, _ := metadata.FromOutgoingContext(info.Ctx) + reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) + + p.lb.cacheMu.Lock() + defer p.lb.cacheMu.Unlock() + + // Lookup data cache and pending request map using request path and keys. + cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} + dcEntry := p.lb.dataCache.getEntry(cacheKey) + pendingEntry := p.lb.pendingMap[cacheKey] + now := time.Now() + + switch { + // No data cache entry. No pending request. + case dcEntry == nil && pendingEntry == nil: + throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + return p.useDefaultPickIfPossible(info, errRLSThrottled) + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // No data cache entry. Pending request exits. + case dcEntry == nil && pendingEntry != nil: + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. No pending request. + case dcEntry != nil && pendingEntry == nil: + if dcEntry.expiryTime.After(now) { + if !dcEntry.staleTime.IsZero() && dcEntry.staleTime.Before(now) && dcEntry.backoffTime.Before(now) { + p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) + } + // Delegate to child policies. + res, err := p.delegateToChildPoliciesLocked(dcEntry, info) + return res, err + } + + // We get here only if the data cache entry has expired. If entry is in + // backoff, delegate to default target or fail the pick. + if dcEntry.backoffState != nil && dcEntry.backoffTime.After(now) { + // Avoid propagating the status code received on control plane RPCs to the + // data plane which can lead to unexpected outcomes as we do not control + // the status code sent by the control plane. Propagating the status + // message received from the control plane is still fine, as it could be + // useful for debugging purposes. + st := dcEntry.status + return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) + } + + // We get here only if the entry has expired and is not in backoff. + throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") + if throttled { + return p.useDefaultPickIfPossible(info, errRLSThrottled) + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + + // Data cache hit. Pending request exists. + default: + if dcEntry.expiryTime.After(now) { + res, err := p.delegateToChildPoliciesLocked(dcEntry, info) + return res, err + } + // Data cache entry has expired and pending request exists. Queue pick. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} + +// delegateToChildPoliciesLocked is a helper function which iterates through the +// list of child policy wrappers in a cache entry and attempts to find a child +// policy to which this RPC can be routed to. If all child policies are in +// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. +func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { + const rlsDataHeaderName = "x-google-rls-data" + for i, cpw := range dcEntry.childPolicyWrappers { + state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) + // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if + // it is the last one (which handles the case of delegating to the last + // child picker if all child polcies are in TRANSIENT_FAILURE). + if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { + // Any header data received from the RLS server is stored in the + // cache entry and needs to be sent to the actual backend in the + // X-Google-RLS-Data header. + res, err := state.Picker.Pick(info) + if err != nil { + return res, err + } + if res.Metadata == nil { + res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) + } else { + res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData) + } + return res, nil + } + } + // In the unlikely event that we have a cache entry with no targets, we end up + // queueing the RPC. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// useDefaultPickIfPossible is a helper method which delegates to the default +// target if one is configured, or fails the pick with the given error. +func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) { + if p.defaultPolicy != nil { + state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state)) + return state.Picker.Pick(info) + } + return balancer.PickResult{}, errOnNoDefault +} + +// sendRouteLookupRequestLocked adds an entry to the pending request map and +// sends out an RLS request using the passed in arguments. Returns a value +// indicating if the request was throttled by the client-side adaptive +// throttler. +func (p *rlsPicker) sendRouteLookupRequestLocked(cacheKey cacheKey, bs *backoffState, reqKeys map[string]string, reason rlspb.RouteLookupRequest_Reason, staleHeaders string) bool { + if p.lb.pendingMap[cacheKey] != nil { + return false + } + + p.lb.pendingMap[cacheKey] = bs + throttled := p.ctrlCh.lookup(reqKeys, reason, staleHeaders, func(targets []string, headerData string, err error) { + p.handleRouteLookupResponse(cacheKey, targets, headerData, err) + }) + if throttled { + delete(p.lb.pendingMap, cacheKey) + } + return throttled +} + +// handleRouteLookupResponse is the callback invoked by the control channel upon +// receipt of an RLS response. Modifies the data cache and pending requests map +// and sends a new picker. +// +// Acquires the write-lock on the cache. Caller must not hold p.lb.cacheMu. +func (p *rlsPicker) handleRouteLookupResponse(cacheKey cacheKey, targets []string, headerData string, err error) { + p.logger.Infof("Received RLS response for key %+v with targets %+v, headerData %q, err: %v", cacheKey, targets, headerData, err) + + p.lb.cacheMu.Lock() + defer func() { + // Pending request map entry is unconditionally deleted since the request is + // no longer pending. + p.logger.Infof("Removing pending request entry for key %+v", cacheKey) + delete(p.lb.pendingMap, cacheKey) + p.lb.sendNewPicker() + p.lb.cacheMu.Unlock() + }() + + // Lookup the data cache entry or create a new one. + dcEntry := p.lb.dataCache.getEntry(cacheKey) + if dcEntry == nil { + dcEntry = &cacheEntry{} + if _, ok := p.lb.dataCache.addEntry(cacheKey, dcEntry); !ok { + // This is a very unlikely case where we are unable to add a + // data cache entry. Log and leave. + p.logger.Warningf("Failed to add data cache entry for %+v", cacheKey) + return + } + } + + // For failed requests, the data cache entry is modified as follows: + // - status is set to error returned from the control channel + // - current backoff state is available in the pending entry + // - `retries` field is incremented and + // - backoff state is moved to the data cache + // - backoffTime is set to the time indicated by the backoff state + // - backoffExpirationTime is set to twice the backoff time + // - backoffTimer is set to fire after backoffTime + // + // When a proactive cache refresh fails, this would leave the targets and the + // expiry time from the old entry unchanged. And this mean that the old valid + // entry would be used until expiration, and a new picker would be sent upon + // backoff expiry. + now := time.Now() + if err != nil { + dcEntry.status = err + pendingEntry := p.lb.pendingMap[cacheKey] + pendingEntry.retries++ + backoffTime := pendingEntry.bs.Backoff(pendingEntry.retries) + dcEntry.backoffState = pendingEntry + dcEntry.backoffTime = now.Add(backoffTime) + dcEntry.backoffExpiryTime = now.Add(2 * backoffTime) + if dcEntry.backoffState.timer != nil { + dcEntry.backoffState.timer.Stop() + } + dcEntry.backoffState.timer = time.AfterFunc(backoffTime, p.lb.sendNewPicker) + return + } + + // For successful requests, the cache entry is modified as follows: + // - childPolicyWrappers is set to point to the child policy wrappers + // associated with the targets specified in the received response + // - headerData is set to the value received in the response + // - expiryTime, stateTime and earliestEvictionTime are set + // - status is set to nil (OK status) + // - backoff state is cleared + p.setChildPolicyWrappersInCacheEntry(dcEntry, targets) + dcEntry.headerData = headerData + dcEntry.expiryTime = now.Add(p.maxAge) + if p.staleAge != 0 { + dcEntry.staleTime = now.Add(p.staleAge) + } + dcEntry.earliestEvictTime = now.Add(minEvictDuration) + dcEntry.status = nil + dcEntry.backoffState = &backoffState{bs: defaultBackoffStrategy} + dcEntry.backoffTime = time.Time{} + dcEntry.backoffExpiryTime = time.Time{} + p.lb.dataCache.updateEntrySize(dcEntry, computeDataCacheEntrySize(cacheKey, dcEntry)) +} + +// setChildPolicyWrappersInCacheEntry sets up the childPolicyWrappers field in +// the cache entry to point to the child policy wrappers for the targets +// specified in the RLS response. +// +// Caller must hold a write-lock on p.lb.cacheMu. +func (p *rlsPicker) setChildPolicyWrappersInCacheEntry(dcEntry *cacheEntry, newTargets []string) { + // If the childPolicyWrappers field is already pointing to the right targets, + // then the field's value does not need to change. + targetsChanged := true + func() { + if cpws := dcEntry.childPolicyWrappers; cpws != nil { + if len(newTargets) != len(cpws) { + return + } + for i, target := range newTargets { + if cpws[i].target != target { + return + } + } + targetsChanged = false + } + }() + if !targetsChanged { + return + } + + // If the childPolicyWrappers field is not already set to the right targets, + // then it must be reset. We construct a new list of child policies and + // then swap out the old list for the new one. + newChildPolicies := p.lb.acquireChildPolicyReferences(newTargets) + oldChildPolicyTargets := make([]string, len(dcEntry.childPolicyWrappers)) + for i, cpw := range dcEntry.childPolicyWrappers { + oldChildPolicyTargets[i] = cpw.target + } + p.lb.releaseChildPolicyReferences(oldChildPolicyTargets) + dcEntry.childPolicyWrappers = newChildPolicies +} + +func dcEntrySize(key cacheKey, entry *cacheEntry) int64 { + return int64(len(key.path) + len(key.keys) + len(entry.headerData)) +} diff --git a/balancer/rls/picker_test.go b/balancer/rls/picker_test.go new file mode 100644 index 000000000000..a2f84b265b1f --- /dev/null +++ b/balancer/rls/picker_test.go @@ -0,0 +1,848 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "context" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/stubserver" + rlstest "google.golang.org/grpc/internal/testutils/rls" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" + + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// Test verifies the scenario where there is no matching entry in the data cache +// and no pending request either, and the ensuing RLS request is throttled. +func (s) TestPick_DataCacheMiss_NoPendingEntry_ThrottledWithDefaultTarget(t *testing.T) { + // Start an RLS server and set the throttler to always throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) + + // Build RLS service config with a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + defBackendCh, defBackendAddress := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the default target. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, defBackendCh) + + // Make sure no RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, false) +} + +// Test verifies the scenario where there is no matching entry in the data cache +// and no pending request either, and the ensuing RLS request is throttled. +// There is no default target configured in the service config, so the RPC is +// expected to fail with an RLS throttled error. +func (s) TestPick_DataCacheMiss_NoPendingEntry_ThrottledWithoutDefaultTarget(t *testing.T) { + // Start an RLS server and set the throttler to always throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, alwaysThrottlingThrottler()) + + // Build an RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and expect it to fail with RLS throttled error. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, errRLSThrottled) + + // Make sure no RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, false) +} + +// Test verifies the scenario where there is no matching entry in the data cache +// and no pending request either, and the ensuing RLS request is not throttled. +// The RLS response does not contain any backends, so the RPC fails with a +// deadline exceeded error. +func (s) TestPick_DataCacheMiss_NoPendingEntry_NotThrottled(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build an RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and expect it to fail with deadline exceeded error. We use a + // smaller timeout to ensure that the test doesn't run very long. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) +} + +// Test verifies the scenario where there is no matching entry in the data +// cache, but there is a pending request. So, we expect no RLS request to be +// sent out. The pick should be queued and not delegated to the default target. +func (s) TestPick_DataCacheMiss_PendingEntryExists(t *testing.T) { + tests := []struct { + name string + withDefaultTarget bool + }{ + { + name: "withDefaultTarget", + withDefaultTarget: true, + }, + { + name: "withoutDefaultTarget", + withDefaultTarget: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // A unary interceptor which blocks the RouteLookup RPC on the fake + // RLS server until the test is done. The first RPC by the client + // will cause the LB policy to send out an RLS request. This will + // also lead to creation of a pending entry, and further RPCs by the + // client should not result in RLS requests being sent out. + rlsReqCh := make(chan struct{}, 1) + interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + rlsReqCh <- struct{}{} + <-ctx.Done() + return nil, ctx.Err() + } + + // Start an RLS server and set the throttler to never throttle. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build RLS service config with an optional default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + if test.withDefaultTarget { + _, defBackendAddress := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC that results in the RLS request being sent out. And + // since the RLS server is configured to block on the first request, + // this RPC will block until its context expires. This ensures that + // we have a pending cache entry for the duration of the test. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + go func() { + client := testgrpc.NewTestServiceClient(cc) + client.EmptyCall(ctx, &testpb.Empty{}) + }() + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC and expect it to fail the same way. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + makeTestRPCAndVerifyError(ctx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + + // Make sure no RLS request is sent out this time around. + verifyRLSRequest(t, rlsReqCh, false) + }) + } +} + +// Test verifies the scenario where there is a matching entry in the data cache +// which is valid and there is no pending request. The pick is expected to be +// delegated to the child policy. +func (s) TestPick_DataCacheHit_NoPendingEntry_ValidEntry(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build the RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Start a test backend, and setup the fake RLS server to return this as a + // target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Make another RPC and expect it to find the target in the data cache. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure no RLS request is sent out this time around. + verifyRLSRequest(t, rlsReqCh, false) +} + +// Test verifies the scenario where there is a matching entry in the data cache +// which is valid and there is no pending request. The pick is expected to be +// delegated to the child policy. +func (s) TestPick_DataCacheHit_NoPendingEntry_ValidEntry_WithHeaderData(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build the RLS config without a default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + + // Start a test backend which expects the header data contents sent from the + // RLS server to be part of RPC metadata as X-Google-RLS-Data header. + const headerDataContents = "foo,bar,baz" + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + gotHeaderData := metadata.ValueFromIncomingContext(ctx, "x-google-rls-data") + if len(gotHeaderData) != 1 || gotHeaderData[0] != headerDataContents { + return nil, fmt.Errorf("got metadata in `X-Google-RLS-Data` is %v, want %s", gotHeaderData, headerDataContents) + } + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + defer backend.Stop() + + // Setup the fake RLS server to return the above backend as a target in the + // RLS response. Also, populate the header data field in the response. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{ + Targets: []string{backend.Address}, + HeaderData: headerDataContents, + }} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend with the header + // data sent by the RLS server. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := testgrpc.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() RPC: %v", err) + } +} + +// Test verifies the scenario where there is a matching entry in the data cache +// which is stale and there is no pending request. The pick is expected to be +// delegated to the child policy with a proactive cache refresh. +func (s) TestPick_DataCacheHit_NoPendingEntry_StaleEntry(t *testing.T) { + // We expect the same pick behavior (i.e delegated to the child policy) for + // a proactive refresh whether the control channel is throttled or not. + tests := []struct { + name string + throttled bool + }{ + { + name: "throttled", + throttled: true, + }, + { + name: "notThrottled", + throttled: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Start an RLS server and setup the throttler appropriately. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + var throttler *fakeThrottler + firstRPCDone := grpcsync.NewEvent() + if test.throttled { + throttler = oneTimeAllowingThrottler(firstRPCDone) + overrideAdaptiveThrottler(t, throttler) + } else { + throttler = neverThrottlingThrottler() + overrideAdaptiveThrottler(t, throttler) + } + + // Build the RLS config without a default target. Set the stale age + // to a very low value to force entries to become stale quickly. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(time.Minute) + rlsConfig.RouteLookupConfig.StaleAge = durationpb.New(defaultTestShortTimeout) + + // Start a test backend, and setup the fake RLS server to return + // this as a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + firstRPCDone.Fire() + + // The cache entry has a large maxAge, but a small stateAge. We keep + // retrying until the cache entry becomes stale, in which case we expect a + // proactive cache refresh. + // + // If the control channel is not throttled, then we expect an RLS request + // to be sent out. If the control channel is throttled, we expect the fake + // throttler's channel to be signalled. + for { + // Make another RPC and expect it to find the target in the data cache. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + if !test.throttled { + select { + case <-time.After(defaultTestShortTimeout): + // Go back and retry the RPC. + case <-rlsReqCh: + return + } + } else { + select { + case <-time.After(defaultTestShortTimeout): + // Go back and retry the RPC. + case <-throttler.throttleCh: + return + } + } + } + }) + } +} + +// Test verifies scenarios where there is a matching entry in the data cache +// which has expired and there is no pending request. +func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntry(t *testing.T) { + tests := []struct { + name string + throttled bool + withDefaultTarget bool + }{ + { + name: "throttledWithDefaultTarget", + throttled: true, + withDefaultTarget: true, + }, + { + name: "throttledWithoutDefaultTarget", + throttled: true, + withDefaultTarget: false, + }, + { + name: "notThrottled", + throttled: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Start an RLS server and setup the throttler appropriately. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + var throttler *fakeThrottler + firstRPCDone := grpcsync.NewEvent() + if test.throttled { + throttler = oneTimeAllowingThrottler(firstRPCDone) + overrideAdaptiveThrottler(t, throttler) + } else { + throttler = neverThrottlingThrottler() + overrideAdaptiveThrottler(t, throttler) + } + + // Build the RLS config with a very low value for maxAge. This will + // ensure that cache entries become invalid very soon. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a default backend if needed. + var defBackendCh chan struct{} + if test.withDefaultTarget { + var defBackendAddress string + defBackendCh, defBackendAddress = startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + + // Start a test backend, and setup the fake RLS server to return + // this as a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + firstRPCDone.Fire() + + // Keep retrying the RPC until the cache entry expires. Expected behavior + // is dependent on the scenario being tested. + switch { + case test.throttled && test.withDefaultTarget: + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, defBackendCh) + <-throttler.throttleCh + case test.throttled && !test.withDefaultTarget: + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, errRLSThrottled) + <-throttler.throttleCh + case !test.throttled: + for { + // The backend to which the RPC is routed does not change after the + // cache entry expires because the control channel is not throttled. + // So, we need to keep retrying until the cache entry expires, at + // which point we expect an RLS request to be sent out and the RPC to + // get routed to the same testBackend. + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + select { + case <-time.After(defaultTestShortTimeout): + // Go back and retry the RPC. + case <-rlsReqCh: + return + } + } + } + }) + } +} + +// Test verifies scenarios where there is a matching entry in the data cache +// which has expired and is in backoff and there is no pending request. +func (s) TestPick_DataCacheHit_NoPendingEntry_ExpiredEntryInBackoff(t *testing.T) { + tests := []struct { + name string + withDefaultTarget bool + }{ + { + name: "withDefaultTarget", + withDefaultTarget: true, + }, + { + name: "withoutDefaultTarget", + withDefaultTarget: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Start an RLS server and set the throttler to never throttle requests. + rlsServer, rlsReqCh := rlstest.SetupFakeRLSServer(t, nil) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Override the backoff strategy to return a large backoff which + // will make sure the date cache entry remains in backoff for the + // duration of the test. + origBackoffStrategy := defaultBackoffStrategy + defaultBackoffStrategy = &fakeBackoffStrategy{backoff: defaultTestTimeout} + defer func() { defaultBackoffStrategy = origBackoffStrategy }() + + // Build the RLS config with a very low value for maxAge. This will + // ensure that cache entries become invalid very soon. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a default backend if needed. + var defBackendCh chan struct{} + if test.withDefaultTarget { + var defBackendAddress string + defBackendCh, defBackendAddress = startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + + // Start a test backend, and set up the fake RLS server to return this as + // a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + + // Set up the fake RLS server to return errors. This will push the cache + // entry into backoff. + var rlsLastErr = status.Error(codes.DeadlineExceeded, "last RLS request failed") + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Err: rlsLastErr} + }) + + // Since the RLS server is now configured to return errors, this will push + // the cache entry into backoff. The pick will be delegated to the default + // backend if one exits, and will fail with the error returned by the RLS + // server otherwise. + if test.withDefaultTarget { + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, defBackendCh) + } else { + makeTestRPCAndVerifyError(ctx, t, cc, codes.Unavailable, rlsLastErr) + } + }) + } +} + +// Test verifies scenarios where there is a matching entry in the data cache +// which is stale and there is a pending request. +func (s) TestPick_DataCacheHit_PendingEntryExists_StaleEntry(t *testing.T) { + tests := []struct { + name string + withDefaultTarget bool + }{ + { + name: "withDefaultTarget", + withDefaultTarget: true, + }, + { + name: "withoutDefaultTarget", + withDefaultTarget: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // A unary interceptor which simply calls the underlying handler + // until the first client RPC is done. We want one client RPC to + // succeed to ensure that a data cache entry is created. For + // subsequent client RPCs which result in RLS requests, this + // interceptor blocks until the test's context expires. And since we + // configure the RLS LB policy with a really low value for max age, + // this allows us to simulate the condition where the it has an + // expired entry and a pending entry in the cache. + rlsReqCh := make(chan struct{}, 1) + firstRPCDone := grpcsync.NewEvent() + interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + select { + case rlsReqCh <- struct{}{}: + default: + } + if firstRPCDone.HasFired() { + <-ctx.Done() + return nil, ctx.Err() + } + return handler(ctx, req) + } + + // Start an RLS server and set the throttler to never throttle. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build RLS service config with an optional default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + if test.withDefaultTarget { + _, defBackendAddress := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + + // Low value for stale age to force entries to become stale quickly. + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(time.Minute) + rlsConfig.RouteLookupConfig.StaleAge = durationpb.New(defaultTestShortTimeout) + + // Start a test backend, and setup the fake RLS server to return + // this as a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + firstRPCDone.Fire() + + // The cache entry has a large maxAge, but a small stateAge. We keep + // retrying until the cache entry becomes stale, in which case we expect a + // proactive cache refresh. + for { + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + select { + case <-time.After(defaultTestShortTimeout): + // Go back and retry the RPC. + case <-rlsReqCh: + return + } + } + }) + } +} + +// Test verifies scenarios where there is a matching entry in the data cache +// which is expired and there is a pending request. +func (s) TestPick_DataCacheHit_PendingEntryExists_ExpiredEntry(t *testing.T) { + tests := []struct { + name string + withDefaultTarget bool + }{ + { + name: "withDefaultTarget", + withDefaultTarget: true, + }, + { + name: "withoutDefaultTarget", + withDefaultTarget: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // A unary interceptor which simply calls the underlying handler + // until the first client RPC is done. We want one client RPC to + // succeed to ensure that a data cache entry is created. For + // subsequent client RPCs which result in RLS requests, this + // interceptor blocks until the test's context expires. And since we + // configure the RLS LB policy with a really low value for max age, + // this allows us to simulate the condition where the it has an + // expired entry and a pending entry in the cache. + rlsReqCh := make(chan struct{}, 1) + firstRPCDone := grpcsync.NewEvent() + interceptor := func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + select { + case rlsReqCh <- struct{}{}: + default: + } + if firstRPCDone.HasFired() { + <-ctx.Done() + return nil, ctx.Err() + } + return handler(ctx, req) + } + + // Start an RLS server and set the throttler to never throttle. + rlsServer, _ := rlstest.SetupFakeRLSServer(t, nil, grpc.UnaryInterceptor(interceptor)) + overrideAdaptiveThrottler(t, neverThrottlingThrottler()) + + // Build RLS service config with an optional default target. + rlsConfig := buildBasicRLSConfigWithChildPolicy(t, t.Name(), rlsServer.Address) + if test.withDefaultTarget { + _, defBackendAddress := startBackend(t) + rlsConfig.RouteLookupConfig.DefaultTarget = defBackendAddress + } + // Set a low value for maxAge to ensure cache entries expire soon. + rlsConfig.RouteLookupConfig.MaxAge = durationpb.New(defaultTestShortTimeout) + + // Start a test backend, and setup the fake RLS server to return + // this as a target in the RLS response. + testBackendCh, testBackendAddress := startBackend(t) + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rlstest.RouteLookupResponse { + return &rlstest.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{testBackendAddress}}} + }) + + // Register a manual resolver and push the RLS service config + // through it. + r := startManualResolverWithConfig(t, rlsConfig) + + // Dial the backend. + cc, err := grpc.Dial(r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Make an RPC and ensure it gets routed to the test backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + makeTestRPCAndExpectItToReachBackend(ctx, t, cc, testBackendCh) + + // Make sure an RLS request is sent out. + verifyRLSRequest(t, rlsReqCh, true) + firstRPCDone.Fire() + + // At this point, we have a cache entry with a small maxAge, and the + // RLS server is configured to block on further RLS requests. As we + // retry the RPC, at some point the cache entry would expire and + // force us to send an RLS request which would block on the server, + // giving us a pending cache entry for the duration of the test. + go func() { + for client := testgrpc.NewTestServiceClient(cc); ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + client.EmptyCall(ctx, &testpb.Empty{}) + } + }() + verifyRLSRequest(t, rlsReqCh, true) + + // Another RPC at this point should find the pending entry and be queued. + // But since we pass a small deadline, this RPC should fail with a + // deadline exceeded error since the pending request does not return until + // the test is done. And since we have a pending entry, we expect no RLS + // request to be sent out. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + makeTestRPCAndVerifyError(sCtx, t, cc, codes.DeadlineExceeded, context.DeadlineExceeded) + verifyRLSRequest(t, rlsReqCh, false) + }) + } +} + +func TestIsFullMethodNameValid(t *testing.T) { + tests := []struct { + desc string + methodName string + want bool + }{ + { + desc: "does not start with a slash", + methodName: "service/method", + want: false, + }, + { + desc: "does not contain a method", + methodName: "/service", + want: false, + }, + { + desc: "path has more elements", + methodName: "/service/path/to/method", + want: false, + }, + { + desc: "valid", + methodName: "/service/method", + want: true, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + if got := isFullMethodNameValid(test.methodName); got != test.want { + t.Fatalf("isFullMethodNameValid(%q) = %v, want %v", test.methodName, got, test.want) + } + }) + } +} diff --git a/balancer/roundrobin/roundrobin.go b/balancer/roundrobin/roundrobin.go index 43c2a15373a1..f7031ad2251b 100644 --- a/balancer/roundrobin/roundrobin.go +++ b/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "sync" + "sync/atomic" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" @@ -47,11 +47,11 @@ func init() { type rrPickerBuilder struct{} func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: newPicker called with info: %v", info) + logger.Infof("roundrobinPicker: Build called with info: %v", info) if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } - var scs []balancer.SubConn + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) for sc := range info.ReadySCs { scs = append(scs, sc) } @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: grpcrand.Intn(len(scs)), + next: uint32(grpcrand.Intn(len(scs))), } } @@ -69,15 +69,13 @@ type rrPicker struct { // created. The slice is immutable. Each Get() will do a round robin // selection from it and return the selected SubConn. subConns []balancer.SubConn - - mu sync.Mutex - next int + next uint32 } func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - p.mu.Lock() - sc := p.subConns[p.next] - p.next = (p.next + 1) % len(p.subConns) - p.mu.Unlock() + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] return balancer.PickResult{SubConn: sc}, nil } diff --git a/balancer/roundrobin/roundrobin_test.go b/balancer/roundrobin/roundrobin_test.go deleted file mode 100644 index b89cdb4a30f3..000000000000 --- a/balancer/roundrobin/roundrobin_test.go +++ /dev/null @@ -1,549 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package roundrobin_test - -import ( - "context" - "fmt" - "net" - "strings" - "sync" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/grpctest" - imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" -) - -const ( - testMDKey = "test-md" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -type testServer struct { - testpb.UnimplementedTestServiceServer - - testMDChan chan []string -} - -func newTestServer() *testServer { - return &testServer{testMDChan: make(chan []string, 1)} -} - -func (s *testServer) EmptyCall(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - md, ok := metadata.FromIncomingContext(ctx) - if ok && len(md[testMDKey]) != 0 { - select { - case s.testMDChan <- md[testMDKey]: - case <-ctx.Done(): - return nil, ctx.Err() - } - } - return &testpb.Empty{}, nil -} - -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { - return nil -} - -type test struct { - servers []*grpc.Server - serverImpls []*testServer - addresses []string -} - -func (t *test) cleanup() { - for _, s := range t.servers { - s.Stop() - } -} - -func startTestServers(count int) (_ *test, err error) { - t := &test{} - - defer func() { - if err != nil { - t.cleanup() - } - }() - for i := 0; i < count; i++ { - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, fmt.Errorf("failed to listen %v", err) - } - - s := grpc.NewServer() - sImpl := newTestServer() - testpb.RegisterTestServiceServer(s, sImpl) - t.servers = append(t.servers, s) - t.serverImpls = append(t.serverImpls, sImpl) - t.addresses = append(t.addresses, lis.Addr().String()) - - go func(s *grpc.Server, l net.Listener) { - s.Serve(l) - }(s, lis) - } - - return t, nil -} - -func (s) TestOneBackend(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - // The second RPC should succeed. - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } -} - -func (s) TestBackendsRoundRobin(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - backendCount := 5 - test, err := startTestServers(backendCount) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var resolvedAddrs []resolver.Address - for i := 0; i < backendCount; i++ { - resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) - } - - r.UpdateState(resolver.State{Addresses: resolvedAddrs}) - var p peer.Peer - // Make sure connections to all servers are up. - for si := 0; si < backendCount; si++ { - var connected bool - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() == test.addresses[si] { - connected = true - break - } - time.Sleep(time.Millisecond) - } - if !connected { - t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) - } - } - - for i := 0; i < 3*backendCount; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() != test.addresses[i%backendCount] { - t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) - } - } -} - -func (s) TestAddressesRemoved(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - // The second RPC should succeed. - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) - - ctx2, cancel2 := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cancel2() - // Wait for state to change to transient failure. - for src := cc.GetState(); src != connectivity.TransientFailure; src = cc.GetState() { - if !cc.WaitForStateChange(ctx2, src) { - t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.TransientFailure) - } - } - - const msgWant = "produced zero addresses" - if _, err := testc.EmptyCall(ctx2, &testpb.Empty{}); err == nil || !strings.Contains(status.Convert(err).Message(), msgWant) { - t.Fatalf("EmptyCall() = _, %v, want _, Contains(Message(), %q)", err, msgWant) - } -} - -func (s) TestCloseWithPendingRPC(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - testc := testpb.NewTestServiceClient(cc) - - var wg sync.WaitGroup - for i := 0; i < 3; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // This RPC blocks until cc is closed. - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) == codes.DeadlineExceeded { - t.Errorf("RPC failed because of deadline after cc is closed; want error the client connection is closing") - } - cancel() - }() - } - cc.Close() - wg.Wait() -} - -func (s) TestNewAddressWhileBlocking(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - // The second RPC should succeed. - ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, nil", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) - - var wg sync.WaitGroup - for i := 0; i < 3; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // This RPC blocks until NewAddress is called. - testc.EmptyCall(context.Background(), &testpb.Empty{}) - }() - } - time.Sleep(50 * time.Millisecond) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - wg.Wait() -} - -func (s) TestOneServerDown(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - backendCount := 3 - test, err := startTestServers(backendCount) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var resolvedAddrs []resolver.Address - for i := 0; i < backendCount; i++ { - resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) - } - - r.UpdateState(resolver.State{Addresses: resolvedAddrs}) - var p peer.Peer - // Make sure connections to all servers are up. - for si := 0; si < backendCount; si++ { - var connected bool - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() == test.addresses[si] { - connected = true - break - } - time.Sleep(time.Millisecond) - } - if !connected { - t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) - } - } - - for i := 0; i < 3*backendCount; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() != test.addresses[i%backendCount] { - t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) - } - } - - // Stop one server, RPCs should roundrobin among the remaining servers. - backendCount-- - test.servers[backendCount].Stop() - // Loop until see server[backendCount-1] twice without seeing server[backendCount]. - var targetSeen int - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - targetSeen = 0 - t.Logf("EmptyCall() = _, %v, want _, ", err) - // Due to a race, this RPC could possibly get the connection that - // was closing, and this RPC may fail. Keep trying when this - // happens. - continue - } - switch p.Addr.String() { - case test.addresses[backendCount-1]: - targetSeen++ - case test.addresses[backendCount]: - // Reset targetSeen if peer is server[backendCount]. - targetSeen = 0 - } - // Break to make sure the last picked address is server[-1], so the following for loop won't be flaky. - if targetSeen >= 2 { - break - } - } - if targetSeen != 2 { - t.Fatal("Failed to see server[backendCount-1] twice without seeing server[backendCount]") - } - for i := 0; i < 3*backendCount; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() != test.addresses[i%backendCount] { - t.Errorf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) - } - } -} - -func (s) TestAllServersDown(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - backendCount := 3 - test, err := startTestServers(backendCount) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var resolvedAddrs []resolver.Address - for i := 0; i < backendCount; i++ { - resolvedAddrs = append(resolvedAddrs, resolver.Address{Addr: test.addresses[i]}) - } - - r.UpdateState(resolver.State{Addresses: resolvedAddrs}) - var p peer.Peer - // Make sure connections to all servers are up. - for si := 0; si < backendCount; si++ { - var connected bool - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() == test.addresses[si] { - connected = true - break - } - time.Sleep(time.Millisecond) - } - if !connected { - t.Fatalf("Connection to %v was not up after more than 1 second", test.addresses[si]) - } - } - - for i := 0; i < 3*backendCount; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}, grpc.Peer(&p)); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - if p.Addr.String() != test.addresses[i%backendCount] { - t.Fatalf("Index %d: want peer %v, got peer %v", i, test.addresses[i%backendCount], p.Addr.String()) - } - } - - // All servers are stopped, failfast RPC should fail with unavailable. - for i := 0; i < backendCount; i++ { - test.servers[i].Stop() - } - time.Sleep(100 * time.Millisecond) - for i := 0; i < 1000; i++ { - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.Unavailable { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("Failfast RPCs didn't fail with Unavailable after all servers are stopped") -} - -func (s) TestUpdateAddressAttributes(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - test, err := startTestServers(1) - if err != nil { - t.Fatalf("failed to start servers: %v", err) - } - defer test.cleanup() - - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - testc := testpb.NewTestServiceClient(cc) - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - if _, err := testc.EmptyCall(ctx, &testpb.Empty{}); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: test.addresses[0]}}}) - // The second RPC should succeed. - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - // The second RPC should not set metadata, so there's no md in the channel. - select { - case md1 := <-test.serverImpls[0].testMDChan: - t.Fatalf("got md: %v, want empty metadata", md1) - case <-time.After(time.Microsecond * 100): - } - - const testMDValue = "test-md-value" - // Update metadata in address. - r.UpdateState(resolver.State{Addresses: []resolver.Address{ - imetadata.Set(resolver.Address{Addr: test.addresses[0]}, metadata.Pairs(testMDKey, testMDValue)), - }}) - // The third RPC should succeed. - if _, err := testc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { - t.Fatalf("EmptyCall() = _, %v, want _, ", err) - } - - // The third RPC should send metadata with it. - md2 := <-test.serverImpls[0].testMDChan - if len(md2) == 0 || md2[0] != testMDValue { - t.Fatalf("got md: %v, want %v", md2, []string{testMDValue}) - } -} diff --git a/balancer/weightedroundrobin/balancer.go b/balancer/weightedroundrobin/balancer.go new file mode 100644 index 000000000000..797b9aa0a960 --- /dev/null +++ b/balancer/weightedroundrobin/balancer.go @@ -0,0 +1,537 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// Name is the name of the weighted round robin balancer. +const Name = "weighted_round_robin" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &wrrBalancer{ + cc: cc, + subConns: resolver.NewAddressMap(), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + scMap: make(map[balancer.SubConn]*weightedSubConn), + connectivityState: connectivity.Connecting, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &lbConfig{ + // Default values as documented in A58. + OOBReportingPeriod: iserviceconfig.Duration(10 * time.Second), + BlackoutPeriod: iserviceconfig.Duration(10 * time.Second), + WeightExpirationPeriod: iserviceconfig.Duration(3 * time.Minute), + WeightUpdatePeriod: iserviceconfig.Duration(time.Second), + ErrorUtilizationPenalty: 1, + } + if err := json.Unmarshal(js, lbCfg); err != nil { + return nil, fmt.Errorf("wrr: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + + if lbCfg.ErrorUtilizationPenalty < 0 { + return nil, fmt.Errorf("wrr: errorUtilizationPenalty must be non-negative") + } + + // For easier comparisons later, ensure the OOB reporting period is unset + // (0s) when OOB reports are disabled. + if !lbCfg.EnableOOBLoadReport { + lbCfg.OOBReportingPeriod = 0 + } + + // Impose lower bound of 100ms on weightUpdatePeriod. + if !internal.AllowAnyWeightUpdatePeriod && lbCfg.WeightUpdatePeriod < iserviceconfig.Duration(100*time.Millisecond) { + lbCfg.WeightUpdatePeriod = iserviceconfig.Duration(100 * time.Millisecond) + } + + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// wrrBalancer implements the weighted round robin LB policy. +type wrrBalancer struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + cfg *lbConfig // active config + subConns *resolver.AddressMap // active weightedSubConns mapped by address + scMap map[balancer.SubConn]*weightedSubConn + connectivityState connectivity.State // aggregate state + csEvltr *balancer.ConnectivityStateEvaluator + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure + stopPicker func() +} + +func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + b.logger.Infof("UpdateCCS: %v", ccs) + b.resolverErr = nil + cfg, ok := ccs.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("wrr: received nil or illegal BalancerConfig (type %T): %v", ccs.BalancerConfig, ccs.BalancerConfig) + } + + b.cfg = cfg + b.updateAddresses(ccs.ResolverState.Addresses) + + if len(ccs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("resolver produced zero addresses")) // will call regeneratePicker + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + + return nil +} + +func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { + addrsSet := resolver.NewAddressMap() + + // Loop through new address list and create subconns for any new addresses. + for _, addr := range addrs { + if _, ok := addrsSet.Get(addr); ok { + // Redundant address; skip. + continue + } + addrsSet.Set(addr, nil) + + var wsc *weightedSubConn + wsci, ok := b.subConns.Get(addr) + if ok { + wsc = wsci.(*weightedSubConn) + } else { + // addr is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) + if err != nil { + b.logger.Warningf("Failed to create new SubConn for address %v: %v", addr, err) + continue + } + wsc = &weightedSubConn{ + SubConn: sc, + logger: b.logger, + connectivityState: connectivity.Idle, + // Initially, we set load reports to off, because they are not + // running upon initial weightedSubConn creation. + cfg: &lbConfig{EnableOOBLoadReport: false}, + } + b.subConns.Set(addr, wsc) + b.scMap[sc] = wsc + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + // Update config for existing weightedSubConn or send update for first + // time to new one. Ensures an OOB listener is running if needed + // (and stops the existing one if applicable). + wsc.updateConfig(b.cfg) + } + + // Loop through existing subconns and remove ones that are not in addrs. + for _, addr := range b.subConns.Keys() { + if _, ok := addrsSet.Get(addr); ok { + // Existing address also in new address list; skip. + continue + } + // addr was removed by resolver. Remove. + wsci, _ := b.subConns.Get(addr) + wsc := wsci.(*weightedSubConn) + b.cc.RemoveSubConn(wsc.SubConn) + b.subConns.Delete(addr) + } +} + +func (b *wrrBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.connectivityState = connectivity.TransientFailure + } + if b.connectivityState != connectivity.TransientFailure { + // No need to update the picker since no error is being returned. + return + } + b.regeneratePicker() +} + +func (b *wrrBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + wsc := b.scMap[sc] + if wsc == nil { + b.logger.Errorf("UpdateSubConnState called with an unknown SubConn: %p, %v", sc, state) + return + } + if b.logger.V(2) { + logger.Infof("UpdateSubConnState(%+v, %+v)", sc, state) + } + + cs := state.ConnectivityState + + if cs == connectivity.TransientFailure { + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + if cs == connectivity.Shutdown { + delete(b.scMap, sc) + // The subconn was removed from b.subConns when the address was removed + // in updateAddresses. + } + + oldCS := wsc.updateConnectivityState(cs) + b.connectivityState = b.csEvltr.RecordTransition(oldCS, cs) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (cs == connectivity.Ready) != (oldCS == connectivity.Ready) || + b.connectivityState == connectivity.TransientFailure { + b.regeneratePicker() + } +} + +// Close stops the balancer. It cancels any ongoing scheduler updates and +// stops any ORCA listeners. +func (b *wrrBalancer) Close() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + for _, wsc := range b.scMap { + // Ensure any lingering OOB watchers are stopped. + wsc.updateConnectivityState(connectivity.Shutdown) + } +} + +// ExitIdle is ignored; we always connect to all backends. +func (b *wrrBalancer) ExitIdle() {} + +func (b *wrrBalancer) readySubConns() []*weightedSubConn { + var ret []*weightedSubConn + for _, v := range b.subConns.Values() { + wsc := v.(*weightedSubConn) + if wsc.connectivityState == connectivity.Ready { + ret = append(ret, wsc) + } + } + return ret +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.connectivityState is +// TransientFailure. +func (b *wrrBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *wrrBalancer) regeneratePicker() { + if b.stopPicker != nil { + b.stopPicker() + b.stopPicker = nil + } + + switch b.connectivityState { + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(b.mergeErrors()), + }) + return + case connectivity.Connecting, connectivity.Idle: + // Idle could happen very briefly if all subconns are Idle and we've + // asked them to connect but they haven't reported Connecting yet. + // Report the same as Connecting since this is temporary. + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }) + return + case connectivity.Ready: + b.connErr = nil + } + + p := &picker{ + v: grpcrand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), + } + var ctx context.Context + ctx, b.stopPicker = context.WithCancel(context.Background()) + p.start(ctx) + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.connectivityState, + Picker: p, + }) +} + +// picker is the WRR policy's picker. It uses live-updating backend weights to +// update the scheduler periodically and ensure picks are routed proportional +// to those weights. +type picker struct { + scheduler unsafe.Pointer // *scheduler; accessed atomically + v uint32 // incrementing value used by the scheduler; accessed atomically + cfg *lbConfig // active config when picker created + subConns []*weightedSubConn // all READY subconns +} + +// scWeights returns a slice containing the weights from p.subConns in the same +// order as p.subConns. +func (p *picker) scWeights() []float64 { + ws := make([]float64, len(p.subConns)) + now := internal.TimeNow() + for i, wsc := range p.subConns { + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod)) + } + return ws +} + +func (p *picker) inc() uint32 { + return atomic.AddUint32(&p.v, 1) +} + +func (p *picker) regenerateScheduler() { + s := newScheduler(p.scWeights(), p.inc) + atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) +} + +func (p *picker) start(ctx context.Context) { + p.regenerateScheduler() + if len(p.subConns) == 1 { + // No need to regenerate weights with only one backend. + return + } + go func() { + ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + p.regenerateScheduler() + } + } + }() +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + // Read the scheduler atomically. All scheduler operations are threadsafe, + // and if the scheduler is replaced during this usage, we want to use the + // scheduler that was live when the pick started. + sched := *(*scheduler)(atomic.LoadPointer(&p.scheduler)) + + pickedSC := p.subConns[sched.nextIndex()] + pr := balancer.PickResult{SubConn: pickedSC.SubConn} + if !p.cfg.EnableOOBLoadReport { + pr.Done = func(info balancer.DoneInfo) { + if load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport); ok && load != nil { + pickedSC.OnLoadReport(load) + } + } + } + return pr, nil +} + +// weightedSubConn is the wrapper of a subconn that holds the subconn and its +// weight (and other parameters relevant to computing the effective weight). +// When needed, it also tracks connectivity state, listens for metrics updates +// by implementing the orca.OOBListener interface and manages that listener. +type weightedSubConn struct { + balancer.SubConn + logger *grpclog.PrefixLogger + + // The following fields are only accessed on calls into the LB policy, and + // do not need a mutex. + connectivityState connectivity.State + stopORCAListener func() + + // The following fields are accessed asynchronously and are protected by + // mu. Note that mu may not be held when calling into the stopORCAListener + // or when registering a new listener, as those calls require the ORCA + // producer mu which is held when calling the listener, and the listener + // holds mu. + mu sync.Mutex + weightVal float64 + nonEmptySince time.Time + lastUpdated time.Time + cfg *lbConfig +} + +func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { + if w.logger.V(2) { + w.logger.Infof("Received load report for subchannel %v: %v", w.SubConn, load) + } + // Update weights of this subchannel according to the reported load + utilization := load.ApplicationUtilization + if utilization == 0 { + utilization = load.CpuUtilization + } + if utilization == 0 || load.RpsFractional == 0 { + if w.logger.V(2) { + w.logger.Infof("Ignoring empty load report for subchannel %v", w.SubConn) + } + return + } + + w.mu.Lock() + defer w.mu.Unlock() + + errorRate := load.Eps / load.RpsFractional + w.weightVal = load.RpsFractional / (utilization + errorRate*w.cfg.ErrorUtilizationPenalty) + if w.logger.V(2) { + w.logger.Infof("New weight for subchannel %v: %v", w.SubConn, w.weightVal) + } + + w.lastUpdated = internal.TimeNow() + if w.nonEmptySince == (time.Time{}) { + w.nonEmptySince = w.lastUpdated + } +} + +// updateConfig updates the parameters of the WRR policy and +// stops/starts/restarts the ORCA OOB listener. +func (w *weightedSubConn) updateConfig(cfg *lbConfig) { + w.mu.Lock() + oldCfg := w.cfg + w.cfg = cfg + w.mu.Unlock() + + newPeriod := cfg.OOBReportingPeriod + if cfg.EnableOOBLoadReport == oldCfg.EnableOOBLoadReport && + newPeriod == oldCfg.OOBReportingPeriod { + // Load reporting wasn't enabled before or after, or load reporting was + // enabled before and after, and had the same period. (Note that with + // load reporting disabled, OOBReportingPeriod is always 0.) + return + } + // (Optionally stop and) start the listener to use the new config's + // settings for OOB reporting. + + if w.stopORCAListener != nil { + w.stopORCAListener() + } + if !cfg.EnableOOBLoadReport { + w.stopORCAListener = nil + return + } + if w.logger.V(2) { + w.logger.Infof("Registering ORCA listener for %v with interval %v", w.SubConn, newPeriod) + } + opts := orca.OOBListenerOptions{ReportInterval: time.Duration(newPeriod)} + w.stopORCAListener = orca.RegisterOOBListener(w.SubConn, w, opts) +} + +func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connectivity.State { + switch cs { + case connectivity.Idle: + // Always reconnect when idle. + w.SubConn.Connect() + case connectivity.Ready: + // If we transition back to READY state, reset nonEmptySince so that we + // apply the blackout period after we start receiving load data. Note + // that we cannot guarantee that we will never receive lingering + // callbacks for backend metric reports from the previous connection + // after the new connection has been established, but they should be + // masked by new backend metric reports from the new connection by the + // time the blackout period ends. + w.mu.Lock() + w.nonEmptySince = time.Time{} + w.mu.Unlock() + case connectivity.Shutdown: + if w.stopORCAListener != nil { + w.stopORCAListener() + } + } + + oldCS := w.connectivityState + + if oldCS == connectivity.TransientFailure && + (cs == connectivity.Connecting || cs == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return oldCS + } + + w.connectivityState = cs + + return oldCS +} + +// weight returns the current effective weight of the subconn, taking into +// account the parameters. Returns 0 for blacked out or expired data, which +// will cause the backend weight to be treated as the mean of the weights of +// the other backends. +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration) float64 { + w.mu.Lock() + defer w.mu.Unlock() + // If the most recent update was longer ago than the expiration period, + // reset nonEmptySince so that we apply the blackout period again if we + // start getting data again in the future, and return 0. + if now.Sub(w.lastUpdated) >= weightExpirationPeriod { + w.nonEmptySince = time.Time{} + return 0 + } + // If we don't have at least blackoutPeriod worth of data, return 0. + if blackoutPeriod != 0 && (w.nonEmptySince == (time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + return 0 + } + return w.weightVal +} diff --git a/balancer/weightedroundrobin/balancer_test.go b/balancer/weightedroundrobin/balancer_test.go new file mode 100644 index 000000000000..1d67bcf1f008 --- /dev/null +++ b/balancer/weightedroundrobin/balancer_test.go @@ -0,0 +1,756 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin_test + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + + wrr "google.golang.org/grpc/balancer/weightedroundrobin" + iwrr "google.golang.org/grpc/balancer/weightedroundrobin/internal" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const defaultTestTimeout = 10 * time.Second +const weightUpdatePeriod = 50 * time.Millisecond +const weightExpirationPeriod = time.Minute +const oobReportingInterval = 10 * time.Millisecond + +func init() { + iwrr.AllowAnyWeightUpdatePeriod = true +} + +func boolp(b bool) *bool { return &b } +func float64p(f float64) *float64 { return &f } +func stringp(s string) *string { return &s } + +var ( + perCallConfig = iwrr.LBConfig{ + EnableOOBLoadReport: boolp(false), + OOBReportingPeriod: stringp("0.005s"), + BlackoutPeriod: stringp("0s"), + WeightExpirationPeriod: stringp("60s"), + WeightUpdatePeriod: stringp(".050s"), + ErrorUtilizationPenalty: float64p(0), + } + oobConfig = iwrr.LBConfig{ + EnableOOBLoadReport: boolp(true), + OOBReportingPeriod: stringp("0.005s"), + BlackoutPeriod: stringp("0s"), + WeightExpirationPeriod: stringp("60s"), + WeightUpdatePeriod: stringp(".050s"), + ErrorUtilizationPenalty: float64p(0), + } +) + +type testServer struct { + *stubserver.StubServer + + oobMetrics orca.ServerMetricsRecorder // Attached to the OOB stream. + callMetrics orca.CallMetricsRecorder // Attached to per-call metrics. +} + +type reportType int + +const ( + reportNone reportType = iota + reportOOB + reportCall + reportBoth +) + +func startServer(t *testing.T, r reportType) *testServer { + t.Helper() + + smr := orca.NewServerMetricsRecorder() + cmr := orca.NewServerMetricsRecorder().(orca.CallMetricsRecorder) + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if r := orca.CallMetricsRecorderFromContext(ctx); r != nil { + // Copy metrics from what the test set in cmr into r. + sm := cmr.(orca.ServerMetricsProvider).ServerMetrics() + r.SetApplicationUtilization(sm.AppUtilization) + r.SetQPS(sm.QPS) + r.SetEPS(sm.EPS) + } + return &testpb.Empty{}, nil + }, + } + + var sopts []grpc.ServerOption + if r == reportCall || r == reportBoth { + sopts = append(sopts, orca.CallMetricsServerOption(nil)) + } + + if r == reportOOB || r == reportBoth { + oso := orca.ServiceOptions{ + ServerMetricsProvider: smr, + MinReportingInterval: 10 * time.Millisecond, + } + internal.ORCAAllowAnyMinReportingInterval.(func(so *orca.ServiceOptions))(&oso) + sopts = append(sopts, stubserver.RegisterServiceServerOption(func(s *grpc.Server) { + if err := orca.Register(s, oso); err != nil { + t.Fatalf("Failed to register orca service: %v", err) + } + })) + } + + if err := ss.StartServer(sopts...); err != nil { + t.Fatalf("Error starting server: %v", err) + } + t.Cleanup(ss.Stop) + + return &testServer{ + StubServer: ss, + oobMetrics: smr, + callMetrics: cmr, + } +} + +func svcConfig(t *testing.T, wrrCfg iwrr.LBConfig) string { + t.Helper() + m, err := json.Marshal(wrrCfg) + if err != nil { + t.Fatalf("Error marshaling JSON %v: %v", wrrCfg, err) + } + sc := fmt.Sprintf(`{"loadBalancingConfig": [ {%q:%v} ] }`, wrr.Name, string(m)) + t.Logf("Marshaled service config: %v", sc) + return sc +} + +// Tests basic functionality with one address. With only one address, load +// reporting doesn't affect routing at all. +func (s) TestBalancer_OneAddress(t *testing.T) { + testCases := []struct { + rt reportType + cfg iwrr.LBConfig + }{ + {rt: reportNone, cfg: perCallConfig}, + {rt: reportCall, cfg: perCallConfig}, + {rt: reportOOB, cfg: oobConfig}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("reportType:%v", tc.rt), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv := startServer(t, tc.rt) + + sc := svcConfig(t, tc.cfg) + if err := srv.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + + // Perform many RPCs to ensure the LB policy works with 1 address. + for i := 0; i < 100; i++ { + srv.callMetrics.SetQPS(float64(i)) + srv.oobMetrics.SetQPS(float64(i)) + if _, err := srv.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("Error from EmptyCall: %v", err) + } + time.Sleep(time.Millisecond) // Delay; test will run 100ms and should perform ~10 weight updates + } + }) + } +} + +// Tests two addresses with ORCA reporting disabled (should fall back to pure +// RR). +func (s) TestBalancer_TwoAddresses_ReportingDisabled(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportNone) + srv2 := startServer(t, reportNone) + + sc := svcConfig(t, perCallConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Perform many RPCs to ensure the LB policy works with 2 addresses. + for i := 0; i < 20; i++ { + roundrobin.CheckRoundRobinRPCs(ctx, srv1.Client, addrs) + } +} + +// Tests two addresses with per-call ORCA reporting enabled. Checks the +// backends are called in the appropriate ratios. +func (s) TestBalancer_TwoAddresses_ReportingEnabledPerCall(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportCall) + srv2 := startServer(t, reportCall) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.callMetrics.SetQPS(10.0) + srv1.callMetrics.SetApplicationUtilization(1.0) + + srv2.callMetrics.SetQPS(10.0) + srv2.callMetrics.SetApplicationUtilization(.1) + + sc := svcConfig(t, perCallConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) +} + +// Tests two addresses with OOB ORCA reporting enabled. Checks the backends +// are called in the appropriate ratios. +func (s) TestBalancer_TwoAddresses_ReportingEnabledOOB(t *testing.T) { + testCases := []struct { + name string + utilSetter func(orca.ServerMetricsRecorder, float64) + }{{ + name: "application_utilization", + utilSetter: func(smr orca.ServerMetricsRecorder, val float64) { + smr.SetApplicationUtilization(val) + }, + }, { + name: "cpu_utilization", + utilSetter: func(smr orca.ServerMetricsRecorder, val float64) { + smr.SetCPUUtilization(val) + }, + }, { + name: "application over cpu", + utilSetter: func(smr orca.ServerMetricsRecorder, val float64) { + smr.SetApplicationUtilization(val) + smr.SetCPUUtilization(2.0) // ignored because ApplicationUtilization is set + }, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + tc.utilSetter(srv1.oobMetrics, 1.0) + + srv2.oobMetrics.SetQPS(10.0) + tc.utilSetter(srv2.oobMetrics, 0.1) + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + }) + } +} + +// Tests two addresses with OOB ORCA reporting enabled, where the reports +// change over time. Checks the backends are called in the appropriate ratios +// before and after modifying the reports. +func (s) TestBalancer_TwoAddresses_UpdateLoads(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetApplicationUtilization(.1) + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Update the loads so srv2 is loaded and srv1 is not; ensure RPCs are + // routed disproportionately to srv1. + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetApplicationUtilization(.1) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetApplicationUtilization(1.0) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod + oobReportingInterval) + checkWeights(ctx, t, srvWeight{srv1, 10}, srvWeight{srv2, 1}) +} + +// Tests two addresses with OOB ORCA reporting enabled, then with switching to +// per-call reporting. Checks the backends are called in the appropriate +// ratios before and after the change. +func (s) TestBalancer_TwoAddresses_OOBThenPerCall(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportBoth) + srv2 := startServer(t, reportBoth) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetApplicationUtilization(.1) + + // For per-call metrics (not used initially), srv2 reports that it is + // loaded and srv1 reports low load. After confirming OOB works, switch to + // per-call and confirm the new routing weights are applied. + srv1.callMetrics.SetQPS(10.0) + srv1.callMetrics.SetApplicationUtilization(.1) + + srv2.callMetrics.SetQPS(10.0) + srv2.callMetrics.SetApplicationUtilization(1.0) + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Update to per-call weights. + c := svcConfig(t, perCallConfig) + parsedCfg := srv1.R.CC.ParseServiceConfig(c) + if parsedCfg.Err != nil { + panic(fmt.Sprintf("Error parsing config %q: %v", c, parsedCfg.Err)) + } + srv1.R.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parsedCfg}) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 10}, srvWeight{srv2, 1}) +} + +// Tests two addresses with OOB ORCA reporting enabled and a non-zero error +// penalty applied. +func (s) TestBalancer_TwoAddresses_ErrorPenalty(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). EPS values are set (but ignored + // initially due to ErrorUtilizationPenalty=0). Later EUP will be updated + // to 0.9 which will cause the weights to be equal and RPCs to be routed + // 50/50. + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) + srv1.oobMetrics.SetEPS(0) + // srv1 weight before: 10.0 / 1.0 = 10.0 + // srv1 weight after: 10.0 / 1.0 = 10.0 + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetApplicationUtilization(.1) + srv2.oobMetrics.SetEPS(10.0) + // srv2 weight before: 10.0 / 0.1 = 100.0 + // srv2 weight after: 10.0 / 1.0 = 10.0 + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Update to include an error penalty in the weights. + newCfg := oobConfig + newCfg.ErrorUtilizationPenalty = float64p(0.9) + c := svcConfig(t, newCfg) + parsedCfg := srv1.R.CC.ParseServiceConfig(c) + if parsedCfg.Err != nil { + panic(fmt.Sprintf("Error parsing config %q: %v", c, parsedCfg.Err)) + } + srv1.R.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parsedCfg}) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod + oobReportingInterval) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1}) +} + +// Tests that the blackout period causes backends to use 0 as their weight +// (meaning to use the average weight) until the blackout period elapses. +func (s) TestBalancer_TwoAddresses_BlackoutPeriod(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + var mu sync.Mutex + start := time.Now() + now := start + setNow := func(t time.Time) { + mu.Lock() + defer mu.Unlock() + now = t + } + + setTimeNow(func() time.Time { + mu.Lock() + defer mu.Unlock() + return now + }) + t.Cleanup(func() { setTimeNow(time.Now) }) + + testCases := []struct { + blackoutPeriodCfg *string + blackoutPeriod time.Duration + }{{ + blackoutPeriodCfg: stringp("1s"), + blackoutPeriod: time.Second, + }, { + blackoutPeriodCfg: nil, + blackoutPeriod: 10 * time.Second, // the default + }} + for _, tc := range testCases { + setNow(start) + srv1 := startServer(t, reportOOB) + srv2 := startServer(t, reportOOB) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetApplicationUtilization(.1) + + cfg := oobConfig + cfg.BlackoutPeriod = tc.blackoutPeriodCfg + sc := svcConfig(t, cfg) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + // During the blackout period (1s) we should route roughly 50/50. + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1}) + + // Advance time to right before the blackout period ends and the weights + // should still be zero. + setNow(start.Add(tc.blackoutPeriod - time.Nanosecond)) + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1}) + + // Advance time to right after the blackout period ends and the weights + // should now activate. + setNow(start.Add(tc.blackoutPeriod)) + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + } +} + +// Tests that the weight expiration period causes backends to use 0 as their +// weight (meaning to use the average weight) once the expiration period +// elapses. +func (s) TestBalancer_TwoAddresses_WeightExpiration(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + var mu sync.Mutex + start := time.Now() + now := start + setNow := func(t time.Time) { + mu.Lock() + defer mu.Unlock() + now = t + } + setTimeNow(func() time.Time { + mu.Lock() + defer mu.Unlock() + return now + }) + t.Cleanup(func() { setTimeNow(time.Now) }) + + srv1 := startServer(t, reportBoth) + srv2 := startServer(t, reportBoth) + + // srv1 starts loaded and srv2 starts without load; ensure RPCs are routed + // disproportionately to srv2 (10:1). Because the OOB reporting interval + // is 1 minute but the weights expire in 1 second, routing will go to 50/50 + // after the weights expire. + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) + + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetApplicationUtilization(.1) + + cfg := oobConfig + cfg.OOBReportingPeriod = stringp("60s") + sc := svcConfig(t, cfg) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 2) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Advance what time.Now returns to the weight expiration time minus 1s to + // ensure all weights are still honored. + setNow(start.Add(weightExpirationPeriod - time.Second)) + + // Wait for the weight update period to allow the new weights to be processed. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}) + + // Advance what time.Now returns to the weight expiration time plus 1s to + // ensure all weights expired and addresses are routed evenly. + setNow(start.Add(weightExpirationPeriod + time.Second)) + + // Wait for the weight expiration period so the weights have expired. + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 1}) +} + +// Tests logic surrounding subchannel management. +func (s) TestBalancer_AddressesChanging(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + srv1 := startServer(t, reportBoth) + srv2 := startServer(t, reportBoth) + srv3 := startServer(t, reportBoth) + srv4 := startServer(t, reportBoth) + + // srv1: weight 10 + srv1.oobMetrics.SetQPS(10.0) + srv1.oobMetrics.SetApplicationUtilization(1.0) + // srv2: weight 100 + srv2.oobMetrics.SetQPS(10.0) + srv2.oobMetrics.SetApplicationUtilization(.1) + // srv3: weight 20 + srv3.oobMetrics.SetQPS(20.0) + srv3.oobMetrics.SetApplicationUtilization(1.0) + // srv4: weight 200 + srv4.oobMetrics.SetQPS(20.0) + srv4.oobMetrics.SetApplicationUtilization(.1) + + sc := svcConfig(t, oobConfig) + if err := srv1.StartClient(grpc.WithDefaultServiceConfig(sc)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + srv2.Client = srv1.Client + addrs := []resolver.Address{{Addr: srv1.Address}, {Addr: srv2.Address}, {Addr: srv3.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + + // Call each backend once to ensure the weights have been received. + ensureReached(ctx, t, srv1.Client, 3) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}, srvWeight{srv3, 2}) + + // Add backend 4 + addrs = append(addrs, resolver.Address{Addr: srv4.Address}) + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}, srvWeight{srv3, 2}, srvWeight{srv4, 20}) + + // Shutdown backend 3. RPCs will no longer be routed to it. + srv3.Stop() + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv2, 10}, srvWeight{srv4, 20}) + + // Remove addresses 2 and 3. RPCs will no longer be routed to 2 either. + addrs = []resolver.Address{{Addr: srv1.Address}, {Addr: srv4.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv1, 1}, srvWeight{srv4, 20}) + + // Re-add 2 and remove the rest. + addrs = []resolver.Address{{Addr: srv2.Address}} + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv2, 10}) + + // Re-add 4. + addrs = append(addrs, resolver.Address{Addr: srv4.Address}) + srv1.R.UpdateState(resolver.State{Addresses: addrs}) + time.Sleep(weightUpdatePeriod) + checkWeights(ctx, t, srvWeight{srv2, 10}, srvWeight{srv4, 20}) +} + +func ensureReached(ctx context.Context, t *testing.T, c testgrpc.TestServiceClient, n int) { + t.Helper() + reached := make(map[string]struct{}) + for len(reached) != n { + var peer peer.Peer + if _, err := c.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + t.Fatalf("Error from EmptyCall: %v", err) + } + reached[peer.Addr.String()] = struct{}{} + } +} + +type srvWeight struct { + srv *testServer + w int +} + +const rrIterations = 100 + +// checkWeights does rrIterations RPCs and expects the different backends to be +// routed in a ratio as deterimined by the srvWeights passed in. Allows for +// some variance (+/- 2 RPCs per backend). +func checkWeights(ctx context.Context, t *testing.T, sws ...srvWeight) { + t.Helper() + + c := sws[0].srv.Client + + // Replace the weights with approximate counts of RPCs wanted given the + // iterations performed. + weightSum := 0 + for _, sw := range sws { + weightSum += sw.w + } + for i := range sws { + sws[i].w = rrIterations * sws[i].w / weightSum + } + + for attempts := 0; attempts < 10; attempts++ { + serverCounts := make(map[string]int) + for i := 0; i < rrIterations; i++ { + var peer peer.Peer + if _, err := c.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + t.Fatalf("Error from EmptyCall: %v; timed out waiting for weighted RR behavior?", err) + } + serverCounts[peer.Addr.String()]++ + } + if len(serverCounts) != len(sws) { + continue + } + success := true + for _, sw := range sws { + c := serverCounts[sw.srv.Address] + if c < sw.w-2 || c > sw.w+2 { + success = false + break + } + } + if success { + t.Logf("Passed iteration %v; counts: %v", attempts, serverCounts) + return + } + t.Logf("Failed iteration %v; counts: %v; want %+v", attempts, serverCounts, sws) + time.Sleep(5 * time.Millisecond) + } + t.Fatalf("Failed to route RPCs with proper ratio") +} + +func init() { + setTimeNow(time.Now) + iwrr.TimeNow = timeNow +} + +var timeNowFunc atomic.Value // func() time.Time + +func timeNow() time.Time { + return timeNowFunc.Load().(func() time.Time)() +} + +func setTimeNow(f func() time.Time) { + timeNowFunc.Store(f) +} diff --git a/balancer/weightedroundrobin/config.go b/balancer/weightedroundrobin/config.go new file mode 100644 index 000000000000..38f89d32fb43 --- /dev/null +++ b/balancer/weightedroundrobin/config.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // Whether to enable out-of-band utilization reporting collection from the + // endpoints. By default, per-request utilization reporting is used. + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + + // Load reporting interval to request from the server. Note that the + // server may not provide reports as frequently as the client requests. + // Used only when enable_oob_load_report is true. Default is 10 seconds. + OOBReportingPeriod iserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + + // A given endpoint must report load metrics continuously for at least this + // long before the endpoint weight will be used. This avoids churn when + // the set of endpoint addresses changes. Takes effect both immediately + // after we establish a connection to an endpoint and after + // weight_expiration_period has caused us to stop using the most recent + // load metrics. Default is 10 seconds. + BlackoutPeriod iserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + + // If a given endpoint has not reported load metrics in this long, + // then we stop using the reported weight. This ensures that we do + // not continue to use very stale weights. Once we stop using a stale + // value, if we later start seeing fresh reports again, the + // blackout_period applies. Defaults to 3 minutes. + WeightExpirationPeriod iserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + + // How often endpoint weights are recalculated. Default is 1 second. + WeightUpdatePeriod iserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + + // The multiplier used to adjust endpoint weights with the error rate + // calculated as eps/qps. Default is 1.0. + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} diff --git a/balancer/weightedroundrobin/internal/internal.go b/balancer/weightedroundrobin/internal/internal.go new file mode 100644 index 000000000000..7b64fbf4e574 --- /dev/null +++ b/balancer/weightedroundrobin/internal/internal.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal allows for easier testing of the weightedroundrobin +// package. +package internal + +import ( + "time" +) + +// AllowAnyWeightUpdatePeriod permits any setting of WeightUpdatePeriod for +// testing. Normally a minimum of 100ms is applied. +var AllowAnyWeightUpdatePeriod bool + +// LBConfig allows tests to produce a JSON form of the config from the struct +// instead of using a string. +type LBConfig struct { + EnableOOBLoadReport *bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod *string `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod *string `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod *string `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod *string `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty *float64 `json:"errorUtilizationPenalty,omitempty"` +} + +// TimeNow can be overridden by tests to return a different value for the +// current iserviceconfig. +var TimeNow = time.Now diff --git a/credentials/tls/certprovider/meshca/logging.go b/balancer/weightedroundrobin/logging.go similarity index 81% rename from credentials/tls/certprovider/meshca/logging.go rename to balancer/weightedroundrobin/logging.go index ae20059c4f72..43184ca9ab91 100644 --- a/credentials/tls/certprovider/meshca/logging.go +++ b/balancer/weightedroundrobin/logging.go @@ -1,8 +1,6 @@ -// +build go1.13 - /* * - * Copyright 2020 gRPC authors. + * Copyright 2023 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +16,7 @@ * */ -package meshca +package weightedroundrobin import ( "fmt" @@ -29,8 +27,8 @@ import ( const prefix = "[%p] " -var logger = grpclog.Component("meshca") +var logger = grpclog.Component("weighted-round-robin") -func prefixLogger(p *providerPlugin) *internalgrpclog.PrefixLogger { +func prefixLogger(p *wrrBalancer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } diff --git a/balancer/weightedroundrobin/scheduler.go b/balancer/weightedroundrobin/scheduler.go new file mode 100644 index 000000000000..e19428112e1e --- /dev/null +++ b/balancer/weightedroundrobin/scheduler.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedroundrobin + +import ( + "math" +) + +type scheduler interface { + nextIndex() int +} + +// newScheduler uses scWeights to create a new scheduler for selecting subconns +// in a picker. It will return a round robin implementation if at least +// len(scWeights)-1 are zero or there is only a single subconn, otherwise it +// will return an Earliest Deadline First (EDF) scheduler implementation that +// selects the subchannels according to their weights. +func newScheduler(scWeights []float64, inc func() uint32) scheduler { + n := len(scWeights) + if n == 0 { + return nil + } + if n == 1 { + return &rrScheduler{numSCs: 1, inc: inc} + } + sum := float64(0) + numZero := 0 + max := float64(0) + for _, w := range scWeights { + sum += w + if w > max { + max = w + } + if w == 0 { + numZero++ + } + } + if numZero >= n-1 { + return &rrScheduler{numSCs: uint32(n), inc: inc} + } + unscaledMean := sum / float64(n-numZero) + scalingFactor := maxWeight / max + mean := uint16(math.Round(scalingFactor * unscaledMean)) + + weights := make([]uint16, n) + allEqual := true + for i, w := range scWeights { + if w == 0 { + // Backends with weight = 0 use the mean. + weights[i] = mean + } else { + scaledWeight := uint16(math.Round(scalingFactor * w)) + weights[i] = scaledWeight + if scaledWeight != mean { + allEqual = false + } + } + } + + if allEqual { + return &rrScheduler{numSCs: uint32(n), inc: inc} + } + + logger.Infof("using edf scheduler with weights: %v", weights) + return &edfScheduler{weights: weights, inc: inc} +} + +const maxWeight = math.MaxUint16 + +// edfScheduler implements EDF using the same algorithm as grpc-c++ here: +// +// https://github.com/grpc/grpc/blob/master/src/core/ext/filters/client_channel/lb_policy/weighted_round_robin/static_stride_scheduler.cc +type edfScheduler struct { + inc func() uint32 + weights []uint16 +} + +// Returns the index in s.weights for the picker to choose. +func (s *edfScheduler) nextIndex() int { + const offset = maxWeight / 2 + + for { + idx := uint64(s.inc()) + + // The sequence number (idx) is split in two: the lower %n gives the + // index of the backend, and the rest gives the number of times we've + // iterated through all backends. `generation` is used to + // deterministically decide whether we pick or skip the backend on this + // iteration, in proportion to the backend's weight. + + backendIndex := idx % uint64(len(s.weights)) + generation := idx / uint64(len(s.weights)) + weight := uint64(s.weights[backendIndex]) + + // We pick a backend `weight` times per `maxWeight` generations. The + // multiply and modulus ~evenly spread out the picks for a given + // backend between different generations. The offset by `backendIndex` + // helps to reduce the chance of multiple consecutive non-picks: if we + // have two consecutive backends with an equal, say, 80% weight of the + // max, with no offset we would see 1/5 generations that skipped both. + // TODO(b/190488683): add test for offset efficacy. + mod := uint64(weight*generation+backendIndex*offset) % maxWeight + + if mod < maxWeight-weight { + continue + } + return int(backendIndex) + } +} + +// A simple RR scheduler to use for fallback when fewer than two backends have +// non-zero weights, or all backends have the the same weight, or when only one +// subconn exists. +type rrScheduler struct { + inc func() uint32 + numSCs uint32 +} + +func (s *rrScheduler) nextIndex() int { + idx := s.inc() + return int(idx % s.numSCs) +} diff --git a/balancer/weightedroundrobin/weightedroundrobin.go b/balancer/weightedroundrobin/weightedroundrobin.go index 4b7d3bfedff2..7567462e023d 100644 --- a/balancer/weightedroundrobin/weightedroundrobin.go +++ b/balancer/weightedroundrobin/weightedroundrobin.go @@ -16,46 +16,54 @@ * */ -// Package weightedroundrobin defines a weighted roundrobin balancer. +// Package weightedroundrobin provides an implementation of the weighted round +// robin LB policy, as defined in [gRFC A58]. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +// +// [gRFC A58]: https://github.com/grpc/proposal/blob/master/A58-client-side-weighted-round-robin-lb-policy.md package weightedroundrobin import ( + "fmt" + "google.golang.org/grpc/resolver" ) -// Name is the name of weighted_round_robin balancer. -const Name = "weighted_round_robin" - -// attributeKey is the type used as the key to store AddrInfo in the Attributes -// field of resolver.Address. +// attributeKey is the type used as the key to store AddrInfo in the +// BalancerAttributes field of resolver.Address. type attributeKey struct{} -// AddrInfo will be stored inside Address metadata in order to use weighted -// roundrobin balancer. +// AddrInfo will be stored in the BalancerAttributes field of Address in order +// to use weighted roundrobin balancer. type AddrInfo struct { Weight uint32 } -// SetAddrInfo returns a copy of addr in which the Attributes field is updated -// with addrInfo. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o interface{}) bool { + oa, ok := o.(AddrInfo) + return ok && oa.Weight == a.Weight +} + +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with addrInfo. func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(attributeKey{}, addrInfo) + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) return addr } -// GetAddrInfo returns the AddrInfo stored in the Attributes fields of addr. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// GetAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. func GetAddrInfo(addr resolver.Address) AddrInfo { - v := addr.Attributes.Value(attributeKey{}) + v := addr.BalancerAttributes.Value(attributeKey{}) ai, _ := v.(AddrInfo) return ai } + +func (a AddrInfo) String() string { + return fmt.Sprintf("Weight: %d", a.Weight) +} diff --git a/balancer/weightedroundrobin/weightedwoundrobin_test.go b/balancer/weightedroundrobin/weightedroundrobin_test.go similarity index 97% rename from balancer/weightedroundrobin/weightedwoundrobin_test.go rename to balancer/weightedroundrobin/weightedroundrobin_test.go index aa46c449a13d..d83619da2e6a 100644 --- a/balancer/weightedroundrobin/weightedwoundrobin_test.go +++ b/balancer/weightedroundrobin/weightedroundrobin_test.go @@ -73,7 +73,7 @@ func TestAddrInfoToAndFromAttributes(t *testing.T) { } func TestGetAddInfoEmpty(t *testing.T) { - addr := resolver.Address{Attributes: attributes.New()} + addr := resolver.Address{} gotAddrInfo := GetAddrInfo(addr) wantAddrInfo := AddrInfo{} if !cmp.Equal(gotAddrInfo, wantAddrInfo) { diff --git a/xds/internal/balancer/weightedtarget/logging.go b/balancer/weightedtarget/logging.go similarity index 100% rename from xds/internal/balancer/weightedtarget/logging.go rename to balancer/weightedtarget/logging.go diff --git a/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go b/balancer/weightedtarget/weightedaggregator/aggregator.go similarity index 66% rename from xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go rename to balancer/weightedtarget/weightedaggregator/aggregator.go index 6c36e2a69cd9..27279257ed13 100644 --- a/xds/internal/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -57,6 +57,8 @@ type Aggregator struct { logger *grpclog.PrefixLogger newWRR func() wrr.WRR + csEvltr *balancer.ConnectivityStateEvaluator + mu sync.Mutex // If started is false, no updates should be sent to the parent cc. A closed // sub-balancer could still send pickers to this aggregator. This makes sure @@ -68,6 +70,11 @@ type Aggregator struct { // // If an ID is not in map, it's either removed or never added. idToPickerState map[string]*weightedPickerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool } // New creates a new weighted balancer state aggregator. @@ -76,11 +83,12 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr cc: cc, logger: logger, newWRR: newWRR, + csEvltr: &balancer.ConnectivityStateEvaluator{}, idToPickerState: make(map[string]*weightedPickerState), } } -// Start starts the aggregator. It can be called after Close to restart the +// Start starts the aggregator. It can be called after Stop to restart the // aggretator. func (wbsa *Aggregator) Start() { wbsa.mu.Lock() @@ -88,7 +96,7 @@ func (wbsa *Aggregator) Start() { wbsa.started = true } -// Stop stops the aggregator. When the aggregator is closed, it won't call +// Stop stops the aggregator. When the aggregator is stopped, it won't call // parent ClientConn to update balancer state. func (wbsa *Aggregator) Stop() { wbsa.mu.Lock() @@ -113,6 +121,9 @@ func (wbsa *Aggregator) Add(id string, weight uint32) { }, stateToAggregate: connectivity.Connecting, } + wbsa.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Connecting) + + wbsa.buildAndUpdateLocked() } // Remove removes the sub-balancer state. Future updates from this sub-balancer, @@ -123,9 +134,14 @@ func (wbsa *Aggregator) Remove(id string) { if _, ok := wbsa.idToPickerState[id]; !ok { return } + // Setting the state of the deleted sub-balancer to Shutdown will get csEvltr + // to remove the previous state for any aggregated state evaluations. + // transitions to and from connectivity.Shutdown are ignored by csEvltr. + wbsa.csEvltr.RecordTransition(wbsa.idToPickerState[id].stateToAggregate, connectivity.Shutdown) // Remove id and picker from picker map. This also results in future updates // for this ID to be ignored. delete(wbsa.idToPickerState, id) + wbsa.buildAndUpdateLocked() } // UpdateWeight updates the weight for the given id. Note that this doesn't @@ -141,6 +157,35 @@ func (wbsa *Aggregator) UpdateWeight(id string, newWeight uint32) { pState.weight = newWeight } +// PauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (wbsa *Aggregator) PauseStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = true + wbsa.needUpdateStateOnResume = false +} + +// ResumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (wbsa *Aggregator) ResumeStateUpdates() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.pauseUpdateState = false + if wbsa.needUpdateStateOnResume { + wbsa.cc.UpdateState(wbsa.build()) + } +} + +// NeedUpdateStateOnResume sets the UpdateStateOnResume bool to true, letting a +// picker update be sent once ResumeStateUpdates is called. +func (wbsa *Aggregator) NeedUpdateStateOnResume() { + wbsa.mu.Lock() + defer wbsa.mu.Unlock() + wbsa.needUpdateStateOnResume = true +} + // UpdateState is called to report a balancer state change from sub-balancer. // It's usually called by the balancer group. // @@ -148,25 +193,24 @@ func (wbsa *Aggregator) UpdateWeight(id string, newWeight uint32) { func (wbsa *Aggregator) UpdateState(id string, newState balancer.State) { wbsa.mu.Lock() defer wbsa.mu.Unlock() - oldState, ok := wbsa.idToPickerState[id] + state, ok := wbsa.idToPickerState[id] if !ok { // All state starts with an entry in pickStateMap. If ID is not in map, // it's either removed, or never existed. return } - if !(oldState.state.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting) { + + if !(state.state.ConnectivityState == connectivity.TransientFailure && newState.ConnectivityState == connectivity.Connecting) { // If old state is TransientFailure, and new state is Connecting, don't // update the state, to prevent the aggregated state from being always // CONNECTING. Otherwise, stateToAggregate is the same as // state.ConnectivityState. - oldState.stateToAggregate = newState.ConnectivityState + wbsa.csEvltr.RecordTransition(state.stateToAggregate, newState.ConnectivityState) + state.stateToAggregate = newState.ConnectivityState } - oldState.state = newState + state.state = newState - if !wbsa.started { - return - } - wbsa.cc.UpdateState(wbsa.build()) + wbsa.buildAndUpdateLocked() } // clearState Reset everything to init state (Connecting) but keep the entry in @@ -183,14 +227,21 @@ func (wbsa *Aggregator) clearStates() { } } -// BuildAndUpdate combines the sub-state from each sub-balancer into one state, -// and update it to parent ClientConn. -func (wbsa *Aggregator) BuildAndUpdate() { - wbsa.mu.Lock() - defer wbsa.mu.Unlock() +// buildAndUpdateLocked aggregates the connectivity states of the sub-balancers, +// builds a new picker and sends an update to the parent ClientConn. +// +// Caller must hold wbsa.mu. +func (wbsa *Aggregator) buildAndUpdateLocked() { if !wbsa.started { return } + if wbsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + wbsa.needUpdateStateOnResume = true + return + } + wbsa.cc.UpdateState(wbsa.build()) } @@ -199,39 +250,34 @@ func (wbsa *Aggregator) BuildAndUpdate() { // Caller must hold wbsa.mu. func (wbsa *Aggregator) build() balancer.State { wbsa.logger.Infof("Child pickers with config: %+v", wbsa.idToPickerState) - m := wbsa.idToPickerState - var readyN, connectingN int - readyPickerWithWeights := make([]weightedPickerState, 0, len(m)) - for _, ps := range m { - switch ps.stateToAggregate { - case connectivity.Ready: - readyN++ - readyPickerWithWeights = append(readyPickerWithWeights, *ps) - case connectivity.Connecting: - connectingN++ - } - } - var aggregatedState connectivity.State - switch { - case readyN > 0: - aggregatedState = connectivity.Ready - case connectingN > 0: - aggregatedState = connectivity.Connecting - default: - aggregatedState = connectivity.TransientFailure - } // Make sure picker's return error is consistent with the aggregatedState. - var picker balancer.Picker - switch aggregatedState { - case connectivity.TransientFailure: - picker = base.NewErrPicker(balancer.ErrTransientFailure) + pickers := make([]weightedPickerState, 0, len(wbsa.idToPickerState)) + + switch aggState := wbsa.csEvltr.CurrentState(); aggState { case connectivity.Connecting: - picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) + return balancer.State{ + ConnectivityState: aggState, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)} + case connectivity.TransientFailure: + // this means that all sub-balancers are now in TransientFailure. + for _, ps := range wbsa.idToPickerState { + pickers = append(pickers, *ps) + } + return balancer.State{ + ConnectivityState: aggState, + Picker: newWeightedPickerGroup(pickers, wbsa.newWRR)} default: - picker = newWeightedPickerGroup(readyPickerWithWeights, wbsa.newWRR) + for _, ps := range wbsa.idToPickerState { + if ps.stateToAggregate == connectivity.Ready { + pickers = append(pickers, *ps) + } + } + return balancer.State{ + ConnectivityState: aggState, + Picker: newWeightedPickerGroup(pickers, wbsa.newWRR)} } - return balancer.State{ConnectivityState: aggregatedState, Picker: picker} + } type weightedPickerGroup struct { diff --git a/xds/internal/balancer/weightedtarget/weightedtarget.go b/balancer/weightedtarget/weightedtarget.go similarity index 50% rename from xds/internal/balancer/weightedtarget/weightedtarget.go rename to balancer/weightedtarget/weightedtarget.go index 02b199258cd2..3d5acdab6afe 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget.go +++ b/balancer/weightedtarget/weightedtarget.go @@ -17,6 +17,8 @@ */ // Package weightedtarget implements the weighted_target balancer. +// +// All APIs in this package are experimental. package weightedtarget import ( @@ -24,80 +26,76 @@ import ( "fmt" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget/weightedaggregator" + "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" ) -const weightedTargetName = "weighted_target_experimental" +// Name is the name of the weighted_target balancer. +const Name = "weighted_target_experimental" -// newRandomWRR is the WRR constructor used to pick sub-pickers from +// NewRandomWRR is the WRR constructor used to pick sub-pickers from // sub-balancers. It's to be modified in tests. -var newRandomWRR = wrr.NewRandom +var NewRandomWRR = wrr.NewRandom func init() { - balancer.Register(&weightedTargetBB{}) + balancer.Register(bb{}) } -type weightedTargetBB struct{} +type bb struct{} -func (wt *weightedTargetBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &weightedTargetBalancer{} b.logger = prefixLogger(b) - b.stateAggregator = weightedaggregator.New(cc, b.logger, newRandomWRR) + b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR) b.stateAggregator.Start() - b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, nil, b.logger) + b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, b.logger) b.bg.Start() b.logger.Infof("Created") return b } -func (wt *weightedTargetBB) Name() string { - return weightedTargetName +func (bb) Name() string { + return Name } -func (wt *weightedTargetBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } type weightedTargetBalancer struct { logger *grpclog.PrefixLogger - // TODO: Make this package not dependent on any xds specific code. - // BalancerGroup uses xdsinternal.LocalityID as the key in the map of child - // policies that it maintains and reports load using LRS. Once these two - // dependencies are removed from the balancerGroup, this package will not - // have any dependencies on xds code. bg *balancergroup.BalancerGroup stateAggregator *weightedaggregator.Aggregator - targets map[string]target + targets map[string]Target } // UpdateClientConnState takes the new targets in balancer group, -// creates/deletes sub-balancers and sends them update. Addresses are split into +// creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. -func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - newConfig, ok := s.BalancerConfig.(*lbConfig) +func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) - var rebuildStateAndPicker bool + b.stateAggregator.PauseStateUpdates() + defer b.stateAggregator.ResumeStateUpdates() // Remove sub-pickers and sub-balancers that are not in the new config. - for name := range w.targets { + for name := range b.targets { if _, ok := newConfig.Targets[name]; !ok { - w.stateAggregator.Remove(name) - w.bg.Remove(name) - // Trigger a state/picker update, because we don't want `ClientConn` - // to pick this sub-balancer anymore. - rebuildStateAndPicker = true + b.stateAggregator.Remove(name) + b.bg.Remove(name) } } @@ -107,29 +105,33 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat // // For all sub-balancers, forward the address/balancer config update. for name, newT := range newConfig.Targets { - oldT, ok := w.targets[name] + oldT, ok := b.targets[name] if !ok { // If this is a new sub-balancer, add weights to the picker map. - w.stateAggregator.Add(name, newT.Weight) + b.stateAggregator.Add(name, newT.Weight) // Then add to the balancer group. - w.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) // Not trigger a state/picker update. Wait for the new sub-balancer // to send its updates. + } else if newT.ChildPolicy.Name != oldT.ChildPolicy.Name { + // If the child policy name is different, remove from balancer group + // and re-add. + b.stateAggregator.Remove(name) + b.bg.Remove(name) + b.stateAggregator.Add(name, newT.Weight) + b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) } else if newT.Weight != oldT.Weight { // If this is an existing sub-balancer, update weight if necessary. - w.stateAggregator.UpdateWeight(name, newT.Weight) - // Trigger a state/picker update, because we don't want `ClientConn` - // should do picks with the new weights now. - rebuildStateAndPicker = true + b.stateAggregator.UpdateWeight(name, newT.Weight) } // Forwards all the update: - // - Addresses are from the map after splitting with hierarchy path, + // - addresses are from the map after splitting with hierarchy path, // - Top level service config and attributes are the same, // - Balancer config comes from the targets map. // // TODO: handle error? How to aggregate errors and return? - _ = w.bg.UpdateClientConnState(name, balancer.ClientConnState{ + _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, @@ -139,23 +141,36 @@ func (w *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat }) } - w.targets = newConfig.Targets - - if rebuildStateAndPicker { - w.stateAggregator.BuildAndUpdate() + b.targets = newConfig.Targets + + // If the targets length is zero, it means we have removed all child + // policies from the balancer group and aggregator. + // At the start of this UpdateClientConnState() operation, a call to + // b.stateAggregator.ResumeStateUpdates() is deferred. Thus, setting the + // needUpdateStateOnResume bool to true here will ensure a new picker is + // built as part of that deferred function. Since there are now no child + // policies, the aggregated connectivity state reported form the Aggregator + // will be TRANSIENT_FAILURE. + if len(b.targets) == 0 { + b.stateAggregator.NeedUpdateStateOnResume() } + return nil } -func (w *weightedTargetBalancer) ResolverError(err error) { - w.bg.ResolverError(err) +func (b *weightedTargetBalancer) ResolverError(err error) { + b.bg.ResolverError(err) +} + +func (b *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.bg.UpdateSubConnState(sc, state) } -func (w *weightedTargetBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - w.bg.UpdateSubConnState(sc, state) +func (b *weightedTargetBalancer) Close() { + b.stateAggregator.Stop() + b.bg.Close() } -func (w *weightedTargetBalancer) Close() { - w.stateAggregator.Stop() - w.bg.Close() +func (b *weightedTargetBalancer) ExitIdle() { + b.bg.ExitIdle() } diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config.go b/balancer/weightedtarget/weightedtarget_config.go similarity index 64% rename from xds/internal/balancer/weightedtarget/weightedtarget_config.go rename to balancer/weightedtarget/weightedtarget_config.go index 747ce918bc68..52090cd67b02 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config.go +++ b/balancer/weightedtarget/weightedtarget_config.go @@ -25,30 +25,23 @@ import ( "google.golang.org/grpc/serviceconfig" ) -type target struct { +// Target represents one target with the weight and the child policy. +type Target struct { // Weight is the weight of the child policy. - Weight uint32 + Weight uint32 `json:"weight,omitempty"` // ChildPolicy is the child policy and it's config. - ChildPolicy *internalserviceconfig.BalancerConfig + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } -// lbConfig is the balancer config for weighted_target. The proto representation -// is: -// -// message WeightedTargetConfig { -// message Target { -// uint32 weight = 1; -// repeated LoadBalancingConfig child_policy = 2; -// } -// map targets = 1; -// } -type lbConfig struct { - serviceconfig.LoadBalancingConfig - Targets map[string]target +// LBConfig is the balancer config for weighted_target. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + Targets map[string]Target `json:"targets,omitempty"` } -func parseConfig(c json.RawMessage) (*lbConfig, error) { - var cfg lbConfig +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go b/balancer/weightedtarget/weightedtarget_config_test.go similarity index 65% rename from xds/internal/balancer/weightedtarget/weightedtarget_config_test.go rename to balancer/weightedtarget/weightedtarget_config_test.go index 2208117f60e1..25bbee836abe 100644 --- a/xds/internal/balancer/weightedtarget/weightedtarget_config_test.go +++ b/balancer/weightedtarget/weightedtarget_config_test.go @@ -23,40 +23,42 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/grpclb" + "google.golang.org/grpc/balancer/roundrobin" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" ) const ( testJSONConfig = `{ "targets": { - "cluster_1" : { - "weight":75, - "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] + "cluster_1": { + "weight": 75, + "childPolicy": [{ + "grpclb": { + "childPolicy": [{"pick_first":{}}], + "targetName": "foo-service" + } + }] }, - "cluster_2" : { - "weight":25, - "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] + "cluster_2": { + "weight": 25, + "childPolicy": [{"round_robin": ""}] } } }` - - cdsName = "cds_experimental" ) var ( - cdsConfigParser = balancer.Get(cdsName).(balancer.ConfigParser) - cdsConfigJSON1 = `{"cluster":"cluster_1"}` - cdsConfig1, _ = cdsConfigParser.ParseConfig([]byte(cdsConfigJSON1)) - cdsConfigJSON2 = `{"cluster":"cluster_2"}` - cdsConfig2, _ = cdsConfigParser.ParseConfig([]byte(cdsConfigJSON2)) + grpclbConfigParser = balancer.Get("grpclb").(balancer.ConfigParser) + grpclbConfigJSON = `{"childPolicy": [{"pick_first":{}}], "targetName": "foo-service"}` + grpclbConfig, _ = grpclbConfigParser.ParseConfig([]byte(grpclbConfigJSON)) ) -func Test_parseConfig(t *testing.T) { +func (s) TestParseConfig(t *testing.T) { tests := []struct { name string js string - want *lbConfig + want *LBConfig wantErr bool }{ { @@ -68,20 +70,19 @@ func Test_parseConfig(t *testing.T) { { name: "OK", js: testJSONConfig, - want: &lbConfig{ - Targets: map[string]target{ + want: &LBConfig{ + Targets: map[string]Target{ "cluster_1": { Weight: 75, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: cdsName, - Config: cdsConfig1, + Name: "grpclb", + Config: grpclbConfig, }, }, "cluster_2": { Weight: 25, ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: cdsName, - Config: cdsConfig2, + Name: roundrobin.Name, }, }, }, @@ -93,8 +94,7 @@ func Test_parseConfig(t *testing.T) { t.Run(tt.name, func(t *testing.T) { got, err := parseConfig([]byte(tt.js)) if (err != nil) != tt.wantErr { - t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) - return + t.Fatalf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) } if !cmp.Equal(got, tt.want) { t.Errorf("parseConfig() got unexpected result, diff: %v", cmp.Diff(got, tt.want)) diff --git a/balancer/weightedtarget/weightedtarget_test.go b/balancer/weightedtarget/weightedtarget_test.go new file mode 100644 index 000000000000..5658f302a49b --- /dev/null +++ b/balancer/weightedtarget/weightedtarget_test.go @@ -0,0 +1,1364 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package weightedtarget + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const ( + defaultTestTimeout = 5 * time.Second +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type testConfigBalancerBuilder struct { + balancer.Builder +} + +func newTestConfigBalancerBuilder() *testConfigBalancerBuilder { + return &testConfigBalancerBuilder{ + Builder: balancer.Get(roundrobin.Name), + } +} + +// pickAndCheckError returns a function which takes a picker, invokes the Pick() method +// multiple times and ensures that the error returned by the picker matches the provided error. +func pickAndCheckError(want error) func(balancer.Picker) error { + const rpcCount = 5 + return func(p balancer.Picker) error { + for i := 0; i < rpcCount; i++ { + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), want.Error()) { + return fmt.Errorf("picker.Pick() returned error: %v, want: %v", err, want) + } + } + return nil + } +} + +func (t *testConfigBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + rr := t.Builder.Build(cc, opts) + return &testConfigBalancer{ + Balancer: rr, + } +} + +const testConfigBalancerName = "test_config_balancer" + +func (t *testConfigBalancerBuilder) Name() string { + return testConfigBalancerName +} + +type stringBalancerConfig struct { + serviceconfig.LoadBalancingConfig + configStr string +} + +func (t *testConfigBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg string + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, fmt.Errorf("failed to unmarshal config in %q: %v", testConfigBalancerName, err) + } + return stringBalancerConfig{configStr: cfg}, nil +} + +// testConfigBalancer is a roundrobin balancer, but it takes the balancer config +// string and adds it as an address attribute to the backend addresses. +type testConfigBalancer struct { + balancer.Balancer +} + +// configKey is the type used as the key to store balancer config in the +// Attributes field of resolver.Address. +type configKey struct{} + +func setConfigKey(addr resolver.Address, config string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(configKey{}, config) + return addr +} + +func getConfigKey(attr *attributes.Attributes) (string, bool) { + v := attr.Value(configKey{}) + name, ok := v.(string) + return name, ok +} + +func (b *testConfigBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + c, ok := s.BalancerConfig.(stringBalancerConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type %T", s.BalancerConfig) + } + + addrsWithAttr := make([]resolver.Address, len(s.ResolverState.Addresses)) + for i, addr := range s.ResolverState.Addresses { + addrsWithAttr[i] = setConfigKey(addr, c.configStr) + } + s.BalancerConfig = nil + s.ResolverState.Addresses = addrsWithAttr + return b.Balancer.UpdateClientConnState(s) +} + +func (b *testConfigBalancer) Close() { + b.Balancer.Close() +} + +var ( + wtbBuilder balancer.Builder + wtbParser balancer.ConfigParser + testBackendAddrStrs []string +) + +const testBackendAddrsCount = 12 + +func init() { + balancer.Register(newTestConfigBalancerBuilder()) + for i := 0; i < testBackendAddrsCount; i++ { + testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) + } + wtbBuilder = balancer.Get(Name) + wtbParser = wtbBuilder.(balancer.ConfigParser) + + balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond + NewRandomWRR = testutils.NewTestWRR +} + +// TestWeightedTarget covers the cases that a sub-balancer is added and a +// sub-balancer is removed. It verifies that the addresses and balancer configs +// are forwarded to the right sub-balancer. This test is intended to test the +// glue code in weighted_target. It also tests an empty target config update, +// which should trigger a transient failure state update. +func (s) TestWeightedTarget(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: round_robin". + config1, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"round_robin": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr1, []string{"cluster_1"})}}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + verifyAddressInNewSubConn(t, cc, addr1) + + // Send subconn state change. + sc1 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test pick with one backend. + for i := 0; i < 5; i++ { + gotSCSt, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + + // Remove cluster_1, and add "cluster_2: test_config_balancer". The + // test_config_balancer adds an address attribute whose value is set to the + // config that is passed to it. + config2, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and one address with hierarchy path "cluster_2". + addr2 := resolver.Address{Addr: testBackendAddrStrs[2], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr2, []string{"cluster_2"})}}, + BalancerConfig: config2, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Expect a new subConn from the test_config_balancer which has an address + // attribute set to the config that was passed to it. + verifyAddressInNewSubConn(t, cc, setConfigKey(addr2, "cluster_2")) + + // The subconn for cluster_1 should be removed. + scRemoved := <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) + } + wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + + sc2 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p = <-cc.NewPickerCh + + // Test pick with one backend. + for i := 0; i < 5; i++ { + gotSCSt, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) + } + } + + // Replace child policy of "cluster_1" to "round_robin". + config3, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_2": { + "weight":1, + "childPolicy": [{"round_robin": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_2"]. + addr3 := resolver.Address{Addr: testBackendAddrStrs[3], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr3, []string{"cluster_2"})}}, + BalancerConfig: config3, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + verifyAddressInNewSubConn(t, cc, addr3) + + // The subconn from the test_config_balancer should be removed. + scRemoved = <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) + } + wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + + // Send subconn state change. + sc3 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p = <-cc.NewPickerCh + + // Test pick with one backend. + for i := 0; i < 5; i++ { + gotSCSt, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc3) + } + } + // Update the Weighted Target Balancer with an empty address list and no + // targets. This should cause a Transient Failure State update to the Client + // Conn. + emptyConfig, err := wtbParser.ParseConfig([]byte(`{}`)) + if err != nil { + t.Fatalf("Failed to parse balancer config: %v", err) + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{}, + BalancerConfig: emptyConfig, + }); err != nil { + t.Fatalf("Failed to update ClientConn state: %v", err) + } + + state := <-cc.NewStateCh + if state != connectivity.TransientFailure { + t.Fatalf("Empty target update should have triggered a TF state update, got: %v", state) + } +} + +// TestWeightedTarget_OneSubBalancer_AddRemoveBackend tests the case where we +// have a weighted target balancer will one sub-balancer, and we add and remove +// backends from the subBalancer. +func (s) TestWeightedTarget_OneSubBalancer_AddRemoveBackend(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: round_robin". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"round_robin": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr1, []string{"cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + verifyAddressInNewSubConn(t, cc, addr1) + + // Expect one SubConn, and move it to READY. + sc1 := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test pick with one backend. + for i := 0; i < 5; i++ { + gotSCSt, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + + // Send two addresses. + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + verifyAddressInNewSubConn(t, cc, addr2) + + // Expect one new SubConn, and move it to READY. + sc2 := <-cc.NewSubConnCh + // Update the SubConn to become READY. + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p = <-cc.NewPickerCh + + // Test round robin pick. + want := []balancer.SubConn{sc1, sc2} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Remove the first address. + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr2, []string{"cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Expect one SubConn to be removed. + scRemoved := <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) + } + wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + p = <-cc.NewPickerCh + + // Test pick with only the second SubConn. + for i := 0; i < 5; i++ { + gotSC, _ := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSC.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSC, sc2) + } + } +} + +// TestWeightedTarget_TwoSubBalancers_OneBackend tests the case where we have a +// weighted target balancer with two sub-balancers, each with one backend. +func (s) TestWeightedTarget_TwoSubBalancers_OneBackend(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with one address for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 2) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1}, + "cluster_2": {addr2}, + }) + + // We expect a single subConn on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_2"][0].sc + + // Send state changes for both SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test roundrobin on the last picker. + want := []balancer.SubConn{sc1, sc2} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// TestWeightedTarget_TwoSubBalancers_MoreBackends tests the case where we have +// a weighted target balancer with two sub-balancers, each with more than one +// backend. +func (s) TestWeightedTarget_TwoSubBalancers_MoreBackends(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: round_robin, cluster_2: round_robin". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with two backends for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} + addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_2"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 4) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1, addr2}, + "cluster_2": {addr3, addr4}, + }) + + // We expect two subConns on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_1"][1].sc + sc3 := scs["cluster_2"][0].sc + sc4 := scs["cluster_2"][1].sc + + // Send state changes for all SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test roundrobin on the last picker. RPCs should be sent equally to all + // backends. + want := []balancer.SubConn{sc1, sc2, sc3, sc4} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Turn sc2's connection down, should be RR between balancers. + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + p = <-cc.NewPickerCh + want = []balancer.SubConn{sc1, sc1, sc3, sc4} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Remove subConn corresponding to addr3. + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + scRemoved := <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc3, scRemoved) + } + wtb.UpdateSubConnState(scRemoved, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + p = <-cc.NewPickerCh + want = []balancer.SubConn{sc1, sc4} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Turn sc1's connection down. + wantSubConnErr := errors.New("subConn connection error") + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: wantSubConnErr, + }) + p = <-cc.NewPickerCh + want = []balancer.SubConn{sc4} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Turn last connection to connecting. + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + p = <-cc.NewPickerCh + for i := 0; i < 5; i++ { + if _, err := p.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) + } + } + + // Turn all connections down. + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: wantSubConnErr, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForPicker(ctx, pickAndCheckError(wantSubConnErr)); err != nil { + t.Fatal(err) + } +} + +// TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends tests the +// case where we have a weighted target balancer with two sub-balancers of +// differing weights. +func (s) TestWeightedTarget_TwoSubBalancers_DifferentWeight_MoreBackends(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with two subBalancers, one with twice the weight of the other. + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 2, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with two backends for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} + addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_2"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 4) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1, addr2}, + "cluster_2": {addr3, addr4}, + }) + + // We expect two subConns on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_1"][1].sc + sc3 := scs["cluster_2"][0].sc + sc4 := scs["cluster_2"][1].sc + + // Send state changes for all SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test roundrobin on the last picker. Twice the number of RPCs should be + // sent to cluster_1 when compared to cluster_2. + want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// TestWeightedTarget_ThreeSubBalancers_RemoveBalancer tests the case where we +// have a weighted target balancer with three sub-balancers and we remove one of +// the subBalancers. +func (s) TestWeightedTarget_ThreeSubBalancers_RemoveBalancer(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with two subBalancers, one with twice the weight of the other. + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + }, + "cluster_3": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_3"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with one backend for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_2"}), + hierarchy.Set(addr3, []string{"cluster_3"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 3) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1}, + "cluster_2": {addr2}, + "cluster_3": {addr3}, + }) + + // We expect one subConn on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_2"][0].sc + sc3 := scs["cluster_3"][0].sc + + // Send state changes for all SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + want := []balancer.SubConn{sc1, sc2, sc3} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Remove the second balancer, while the others two are ready. + config, err = wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_3": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_3"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_3"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Removing a subBalancer causes the weighted target LB policy to push a new + // picker which ensures that the removed subBalancer is not picked for RPCs. + p = <-cc.NewPickerCh + + scRemoved := <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scRemoved) + } + want = []balancer.SubConn{sc1, sc3} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Move balancer 3 into transient failure. + wantSubConnErr := errors.New("subConn connection error") + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: wantSubConnErr, + }) + <-cc.NewPickerCh + + // Remove the first balancer, while the third is transient failure. + config, err = wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_3": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_3"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr3, []string{"cluster_3"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Removing a subBalancer causes the weighted target LB policy to push a new + // picker which ensures that the removed subBalancer is not picked for RPCs. + + scRemoved = <-cc.RemoveSubConnCh + if !cmp.Equal(scRemoved, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scRemoved) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := cc.WaitForPicker(ctx, pickAndCheckError(wantSubConnErr)); err != nil { + t.Fatal(err) + } +} + +// TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends tests the case +// where we have a weighted target balancer with two sub-balancers, and we +// change the weight of these subBalancers. +func (s) TestWeightedTarget_TwoSubBalancers_ChangeWeight_MoreBackends(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with two subBalancers, one with twice the weight of the other. + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 2, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with two backends for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + addr3 := resolver.Address{Addr: testBackendAddrStrs[3]} + addr4 := resolver.Address{Addr: testBackendAddrStrs[4]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_2"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 4) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1, addr2}, + "cluster_2": {addr3, addr4}, + }) + + // We expect two subConns on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_1"][1].sc + sc3 := scs["cluster_2"][0].sc + sc4 := scs["cluster_2"][1].sc + + // Send state changes for all SubConns, and wait for the picker. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p := <-cc.NewPickerCh + + // Test roundrobin on the last picker. Twice the number of RPCs should be + // sent to cluster_1 when compared to cluster_2. + want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // Change the weight of cluster_1. + config, err = wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight": 3, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight": 1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_1"}), + hierarchy.Set(addr3, []string{"cluster_2"}), + hierarchy.Set(addr4, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Weight change causes a new picker to be pushed to the channel. + p = <-cc.NewPickerCh + want = []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc2, sc3, sc4} + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// TestWeightedTarget_InitOneSubBalancerTransientFailure tests that at init +// time, with two sub-balancers, if one sub-balancer reports transient_failure, +// the picks won't fail with transient_failure, and should instead wait for the +// other sub-balancer. +func (s) TestWeightedTarget_InitOneSubBalancerTransientFailure(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with one address for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 2) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1}, + "cluster_2": {addr2}, + }) + + // We expect a single subConn on each subBalancer. + sc1 := scs["cluster_1"][0].sc + _ = scs["cluster_2"][0].sc + + // Set one subconn to TransientFailure, this will trigger one sub-balancer + // to report transient failure. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + + p := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + r, err := p.Pick(balancer.PickInfo{}) + if err != balancer.ErrNoSubConnAvailable { + t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrNoSubConnAvailable, r, err) + } + } +} + +// Test that with two sub-balancers, both in transient_failure, if one turns +// connecting, the overall state stays in transient_failure, and all picks +// return transient failure error. +func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + // Start with "cluster_1: test_config_balancer, cluster_2: test_config_balancer". + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_1"}] + }, + "cluster_2": { + "weight":1, + "childPolicy": [{"test_config_balancer": "cluster_2"}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config with one address for each cluster. + addr1 := resolver.Address{Addr: testBackendAddrStrs[1]} + addr2 := resolver.Address{Addr: testBackendAddrStrs[2]} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(addr1, []string{"cluster_1"}), + hierarchy.Set(addr2, []string{"cluster_2"}), + }}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + scs := waitForNewSubConns(t, cc, 2) + verifySubConnAddrs(t, scs, map[string][]resolver.Address{ + "cluster_1": {addr1}, + "cluster_2": {addr2}, + }) + + // We expect a single subConn on each subBalancer. + sc1 := scs["cluster_1"][0].sc + sc2 := scs["cluster_2"][0].sc + + // Set both subconn to TransientFailure, this will put both sub-balancers in + // transient failure. + wantSubConnErr := errors.New("subConn connection error") + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: wantSubConnErr, + }) + <-cc.NewPickerCh + wtb.UpdateSubConnState(sc2, balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + ConnectionError: wantSubConnErr, + }) + p := <-cc.NewPickerCh + + for i := 0; i < 5; i++ { + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), wantSubConnErr.Error()) { + t.Fatalf("picker.Pick() returned error: %v, want: %v", err, wantSubConnErr) + } + } + + // Set one subconn to Connecting, it shouldn't change the overall state. + wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + select { + case <-time.After(100 * time.Millisecond): + case <-cc.NewPickerCh: + t.Fatal("received new picker from the LB policy when expecting none") + } + + for i := 0; i < 5; i++ { + if _, err := p.Pick(balancer.PickInfo{}); err == nil || !strings.Contains(err.Error(), wantSubConnErr.Error()) { + t.Fatalf("picker.Pick() returned error: %v, want: %v", err, wantSubConnErr) + } + } +} + +// Verify that a SubConn is created with the expected address and hierarchy +// path cleared. +func verifyAddressInNewSubConn(t *testing.T, cc *testutils.TestClientConn, addr resolver.Address) { + t.Helper() + + gotAddr := <-cc.NewSubConnAddrsCh + wantAddr := []resolver.Address{hierarchy.Set(addr, []string{})} + if diff := cmp.Diff(gotAddr, wantAddr, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { + t.Fatalf("got unexpected new subconn addrs: %v", diff) + } +} + +// subConnWithAddr wraps a subConn and the address for which it was created. +type subConnWithAddr struct { + sc balancer.SubConn + addr resolver.Address +} + +// waitForNewSubConns waits for `num` number of subConns to be created. This is +// expected to be used from tests using the "test_config_balancer" LB policy, +// which adds an address attribute with value set to the balancer config. +// +// Returned value is a map from subBalancer (identified by its config) to +// subConns created by it. +func waitForNewSubConns(t *testing.T, cc *testutils.TestClientConn, num int) map[string][]subConnWithAddr { + t.Helper() + + scs := make(map[string][]subConnWithAddr) + for i := 0; i < num; i++ { + addrs := <-cc.NewSubConnAddrsCh + if len(addrs) != 1 { + t.Fatalf("received subConns with %d addresses, want 1", len(addrs)) + } + cfg, ok := getConfigKey(addrs[0].Attributes) + if !ok { + t.Fatalf("received subConn address %v contains no attribute for balancer config", addrs[0]) + } + sc := <-cc.NewSubConnCh + scWithAddr := subConnWithAddr{sc: sc, addr: addrs[0]} + scs[cfg] = append(scs[cfg], scWithAddr) + } + return scs +} + +func verifySubConnAddrs(t *testing.T, scs map[string][]subConnWithAddr, wantSubConnAddrs map[string][]resolver.Address) { + t.Helper() + + if len(scs) != len(wantSubConnAddrs) { + t.Fatalf("got new subConns %+v, want %v", scs, wantSubConnAddrs) + } + for cfg, scsWithAddr := range scs { + if len(scsWithAddr) != len(wantSubConnAddrs[cfg]) { + t.Fatalf("got new subConns %+v, want %v", scs, wantSubConnAddrs) + } + wantAddrs := wantSubConnAddrs[cfg] + for i, scWithAddr := range scsWithAddr { + if diff := cmp.Diff(wantAddrs[i].Addr, scWithAddr.addr.Addr); diff != "" { + t.Fatalf("got unexpected new subconn addrs: %v", diff) + } + } + } +} + +const initIdleBalancerName = "test-init-Idle-balancer" + +var errTestInitIdle = fmt.Errorf("init Idle balancer error 0") + +func init() { + stub.Register(initIdleBalancerName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { + bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + err := fmt.Errorf("wrong picker error") + if state.ConnectivityState == connectivity.Idle { + err = errTestInitIdle + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &testutils.TestConstPicker{Err: err}, + }) + }, + }) +} + +// TestInitialIdle covers the case that if the child reports Idle, the overall +// state will be Idle. +func (s) TestInitialIdle(t *testing.T) { + cc := testutils.NewTestClientConn(t) + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"test-init-Idle-balancer": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addrs[0], []string{"cds:cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that a subconn is created with the address, and the hierarchy path + // in the address is cleared. + for range addrs { + sc := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + } + + if state := <-cc.NewStateCh; state != connectivity.Idle { + t.Fatalf("Received aggregated state: %v, want Idle", state) + } +} + +// TestIgnoreSubBalancerStateTransitions covers the case that if the child reports a +// transition from TF to Connecting, the overall state will still be TF. +func (s) TestIgnoreSubBalancerStateTransitions(t *testing.T) { + cc := &tcc{TestClientConn: testutils.NewTestClientConn(t)} + + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"round_robin": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addr := resolver.Address{Addr: testBackendAddrStrs[0], Attributes: nil} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addr, []string{"cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + sc := <-cc.NewSubConnCh + wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + wtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + + // Verify that the SubConnState update from TF to Connecting is ignored. + if len(cc.states) != 2 || cc.states[0].ConnectivityState != connectivity.Connecting || cc.states[1].ConnectivityState != connectivity.TransientFailure { + t.Fatalf("cc.states = %v; want [Connecting, TransientFailure]", cc.states) + } +} + +// tcc wraps a testutils.TestClientConn but stores all state transitions in a +// slice. +type tcc struct { + *testutils.TestClientConn + states []balancer.State +} + +func (t *tcc) UpdateState(bs balancer.State) { + t.states = append(t.states, bs) + t.TestClientConn.UpdateState(bs) +} + +func (s) TestUpdateStatePauses(t *testing.T) { + cc := &tcc{TestClientConn: testutils.NewTestClientConn(t)} + + balFuncs := stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: nil}) + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil}) + return nil + }, + } + stub.Register("update_state_balancer", balFuncs) + + wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) + defer wtb.Close() + + config, err := wtbParser.ParseConfig([]byte(` +{ + "targets": { + "cluster_1": { + "weight":1, + "childPolicy": [{"update_state_balancer": ""}] + } + } +}`)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + addrs := []resolver.Address{{Addr: testBackendAddrStrs[0], Attributes: nil}} + if err := wtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{hierarchy.Set(addrs[0], []string{"cds:cluster_1"})}}, + BalancerConfig: config, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that the only state update is the second one called by the child. + if len(cc.states) != 1 || cc.states[0].ConnectivityState != connectivity.Ready { + t.Fatalf("cc.states = %v; want [connectivity.Ready]", cc.states) + } +} diff --git a/balancer_conn_wrappers.go b/balancer_conn_wrappers.go index 41061d6d3dc5..04b9ad411691 100644 --- a/balancer_conn_wrappers.go +++ b/balancer_conn_wrappers.go @@ -19,151 +19,331 @@ package grpc import ( + "context" "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle +) -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - scBuffer *buffer.Unbounded - done *grpcsync.Event + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. + balancer *gracefulswitch.Balancer + curBalancerName string - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - scBuffer: buffer.NewUnbounded(), - done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case t := <-ccb.scBuffer.Get(): - ccb.scBuffer.Load() - if ccb.done.HasFired() { - break +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + // If the addresses specified in the update contain addresses of type + // "grpclb" and the selected LB policy is not "grpclb", these addresses + // will be filtered out and ccs will be modified with the updated + // address list. + if ccb.curBalancerName != grpclbName { + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) } - ccb.balancerMu.Lock() - su := t.(*scStateUpdate) - ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) - ccb.balancerMu.Unlock() - case <-ccb.done.Done(): + ccs.ResolverState.Addresses = addrs } + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") + } + ccb.mu.Unlock() - if ccb.done.HasFired() { - ccb.balancer.Close() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + return err +} + +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) + ccb.mu.Unlock() +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { return } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() +} + +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() } func (ccb *ccBalancerWrapper) close() { - ccb.done.Fire() -} - -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) +} + +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() return } - ccb.scBuffer.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + + ccb.mode = m + done := ccb.serializer.Done + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-done + // Spawn a goroutine to close the balancer (since it may block trying to + // cleanup all allocated resources) and return early. + go b.Close() } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done } -func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - ccb.balancer.ResolverError(err) - ccb.balancerMu.Unlock() +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { - return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} - acbw.ac.mu.Lock() + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} ac.acbw = acbw - acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - acbw, ok := sc.(*acBalancerWrapper) - if !ok { + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn return } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { + + acbw, ok := sc.(*acBalancerWrapper) + if !ok { return } - delete(ccb.subConns, acbw) - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -172,11 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { + if ccb.isIdleOrClosed() { return } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -187,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -197,58 +380,80 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + ac *addrConn // read-only + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.tearDown(errConnDrain) - return + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.tearDown(errConnDrain) - - if acState == connectivity.Shutdown { - return - } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} - ac, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = ac - ac.mu.Lock() - ac.acbw = acbw - ac.mu.Unlock() - if acState != connectivity.Idle { - ac.connect() - } +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) } -func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() - acbw.ac.connect() +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { acbw.mu.Lock() defer acbw.mu.Unlock() - return acbw.ac + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) } diff --git a/balancer_conn_wrappers_test.go b/balancer_conn_wrappers_test.go deleted file mode 100644 index 935d11d1d391..000000000000 --- a/balancer_conn_wrappers_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "net" - "testing" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" -) - -// TestBalancerErrorResolverPolling injects balancer errors and verifies -// ResolveNow is called on the resolver with the appropriate backoff strategy -// being consulted between ResolveNow calls. -func (s) TestBalancerErrorResolverPolling(t *testing.T) { - // The test balancer will return ErrBadResolverState iff the - // ClientConnState contains no addresses. - bf := stub.BalancerFuncs{ - UpdateClientConnState: func(_ *stub.BalancerData, s balancer.ClientConnState) error { - if len(s.ResolverState.Addresses) == 0 { - return balancer.ErrBadResolverState - } - return nil - }, - } - const balName = "BalancerErrorResolverPolling" - stub.Register(balName, bf) - - testResolverErrorPolling(t, - func(r *manual.Resolver) { - // No addresses so the balancer will fail. - r.CC.UpdateState(resolver.State{}) - }, func(r *manual.Resolver) { - // UpdateState will block if ResolveNow is being called (which blocks on - // rn), so call it in a goroutine. Include some address so the balancer - // will be happy. - go r.CC.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "x"}}}) - }, - WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, balName))) -} - -// TestRoundRobinZeroAddressesResolverPolling reports no addresses to the round -// robin balancer and verifies ResolveNow is called on the resolver with the -// appropriate backoff strategy being consulted between ResolveNow calls. -func (s) TestRoundRobinZeroAddressesResolverPolling(t *testing.T) { - // We need to start a real server or else the connecting loop will call - // ResolveNow after every iteration, even after a valid resolver result is - // returned. - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error while listening. Err: %v", err) - } - defer lis.Close() - s := NewServer() - defer s.Stop() - go s.Serve(lis) - - testResolverErrorPolling(t, - func(r *manual.Resolver) { - // No addresses so the balancer will fail. - r.CC.UpdateState(resolver.State{}) - }, func(r *manual.Resolver) { - // UpdateState will block if ResolveNow is being called (which - // blocks on rn), so call it in a goroutine. Include a valid - // address so the balancer will be happy. - go r.CC.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - }, - WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, roundrobin.Name))) -} diff --git a/balancer_switching_test.go b/balancer_switching_test.go deleted file mode 100644 index 2c6ed576620f..000000000000 --- a/balancer_switching_test.go +++ /dev/null @@ -1,540 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "math" - "testing" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/serviceconfig" -) - -var _ balancer.Builder = &magicalLB{} -var _ balancer.Balancer = &magicalLB{} - -// magicalLB is a ringer for grpclb. It is used to avoid circular dependencies on the grpclb package -type magicalLB struct{} - -func (b *magicalLB) Name() string { - return "grpclb" -} - -func (b *magicalLB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return b -} - -func (b *magicalLB) ResolverError(error) {} - -func (b *magicalLB) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {} - -func (b *magicalLB) UpdateClientConnState(balancer.ClientConnState) error { - return nil -} - -func (b *magicalLB) Close() {} - -func init() { - balancer.Register(&magicalLB{}) -} - -func startServers(t *testing.T, numServers int, maxStreams uint32) ([]*server, func()) { - var servers []*server - for i := 0; i < numServers; i++ { - s := newTestServer() - servers = append(servers, s) - go s.start(t, 0, maxStreams) - s.wait(t, 2*time.Second) - } - return servers, func() { - for i := 0; i < numServers; i++ { - servers[i].stop() - } - } -} - -func checkPickFirst(cc *ClientConn, servers []*server) error { - var ( - req = "port" - reply string - err error - ) - connected := false - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - for i := 0; i < 5000; i++ { - if err = cc.Invoke(ctx, "/foo/bar", &req, &reply); errorDesc(err) == servers[0].port { - if connected { - // connected is set to false if peer is not server[0]. So if - // connected is true here, this is the second time we saw - // server[0] in a row. Break because pickfirst is in effect. - break - } - connected = true - } else { - connected = false - } - time.Sleep(time.Millisecond) - } - if !connected { - return fmt.Errorf("pickfirst is not in effect after 5 second, EmptyCall() = _, %v, want _, %v", err, servers[0].port) - } - - // The following RPCs should all succeed with the first server. - for i := 0; i < 3; i++ { - err = cc.Invoke(ctx, "/foo/bar", &req, &reply) - if errorDesc(err) != servers[0].port { - return fmt.Errorf("index %d: want peer %v, got peer %v", i, servers[0].port, err) - } - } - return nil -} - -func checkRoundRobin(cc *ClientConn, servers []*server) error { - var ( - req = "port" - reply string - err error - ) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure connections to all servers are up. - for i := 0; i < 2; i++ { - // Do this check twice, otherwise the first RPC's transport may still be - // picked by the closing pickfirst balancer, and the test becomes flaky. - for _, s := range servers { - var up bool - for i := 0; i < 5000; i++ { - if err = cc.Invoke(ctx, "/foo/bar", &req, &reply); errorDesc(err) == s.port { - up = true - break - } - time.Sleep(time.Millisecond) - } - if !up { - return fmt.Errorf("server %v is not up within 5 second", s.port) - } - } - } - - serverCount := len(servers) - for i := 0; i < 3*serverCount; i++ { - err = cc.Invoke(ctx, "/foo/bar", &req, &reply) - if errorDesc(err) != servers[i%serverCount].port { - return fmt.Errorf("index %d: want peer %v, got peer %v", i, servers[i%serverCount].port, err) - } - } - return nil -} - -func (s) TestSwitchBalancer(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - const numServers = 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - addrs := []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}} - r.UpdateState(resolver.State{Addresses: addrs}) - // The default balancer is pickfirst. - if err := checkPickFirst(cc, servers); err != nil { - t.Fatalf("check pickfirst returned non-nil error: %v", err) - } - // Switch to roundrobin. - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`), Addresses: addrs}, nil) - if err := checkRoundRobin(cc, servers); err != nil { - t.Fatalf("check roundrobin returned non-nil error: %v", err) - } - // Switch to pickfirst. - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "pick_first"}`), Addresses: addrs}, nil) - if err := checkPickFirst(cc, servers); err != nil { - t.Fatalf("check pickfirst returned non-nil error: %v", err) - } -} - -// Test that balancer specified by dial option will not be overridden. -func (s) TestBalancerDialOption(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - const numServers = 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{}), WithBalancerName(roundrobin.Name)) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - addrs := []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}} - r.UpdateState(resolver.State{Addresses: addrs}) - // The init balancer is roundrobin. - if err := checkRoundRobin(cc, servers); err != nil { - t.Fatalf("check roundrobin returned non-nil error: %v", err) - } - // Switch to pickfirst. - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "pick_first"}`), Addresses: addrs}, nil) - // Balancer is still roundrobin. - if err := checkRoundRobin(cc, servers); err != nil { - t.Fatalf("check roundrobin returned non-nil error: %v", err) - } -} - -// First addr update contains grpclb. -func (s) TestSwitchBalancerGRPCLBFirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - - // ClientConn will switch balancer to grpclb when receives an address of - // type GRPCLB. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}, {Addr: "grpclb", Type: resolver.GRPCLB}}}) - var isGRPCLB bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) - } - - // New update containing new backend and new grpclb. Should not switch - // balancer. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend2"}, {Addr: "grpclb2", Type: resolver.GRPCLB}}}) - for i := 0; i < 200; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if !isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("within 200 ms, cc.balancer switched to !grpclb, want grpclb") - } - - var isPickFirst bool - // Switch balancer to pickfirst. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isPickFirst = cc.curBalancerName == PickFirstBalancerName - cc.mu.Unlock() - if isPickFirst { - break - } - time.Sleep(time.Millisecond) - } - if !isPickFirst { - t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) - } -} - -// First addr update does not contain grpclb. -func (s) TestSwitchBalancerGRPCLBSecond(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) - var isPickFirst bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isPickFirst = cc.curBalancerName == PickFirstBalancerName - cc.mu.Unlock() - if isPickFirst { - break - } - time.Sleep(time.Millisecond) - } - if !isPickFirst { - t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) - } - - // ClientConn will switch balancer to grpclb when receives an address of - // type GRPCLB. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}, {Addr: "grpclb", Type: resolver.GRPCLB}}}) - var isGRPCLB bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) - } - - // New update containing new backend and new grpclb. Should not switch - // balancer. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend2"}, {Addr: "grpclb2", Type: resolver.GRPCLB}}}) - for i := 0; i < 200; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if !isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("within 200 ms, cc.balancer switched to !grpclb, want grpclb") - } - - // Switch balancer back. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isPickFirst = cc.curBalancerName == PickFirstBalancerName - cc.mu.Unlock() - if isPickFirst { - break - } - time.Sleep(time.Millisecond) - } - if !isPickFirst { - t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) - } -} - -// Test that if the current balancer is roundrobin, after switching to grpclb, -// when the resolved address doesn't contain grpclb addresses, balancer will be -// switched back to roundrobin. -func (s) TestSwitchBalancerGRPCLBRoundRobin(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - - sc := parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`) - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) - var isRoundRobin bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isRoundRobin = cc.curBalancerName == "round_robin" - cc.mu.Unlock() - if isRoundRobin { - break - } - time.Sleep(time.Millisecond) - } - if !isRoundRobin { - t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) - } - - // ClientConn will switch balancer to grpclb when receives an address of - // type GRPCLB. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "grpclb", Type: resolver.GRPCLB}}, ServiceConfig: sc}) - var isGRPCLB bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) - } - - // Switch balancer back. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isRoundRobin = cc.curBalancerName == "round_robin" - cc.mu.Unlock() - if isRoundRobin { - break - } - time.Sleep(time.Millisecond) - } - if !isRoundRobin { - t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) - } -} - -// Test that if resolved address list contains grpclb, the balancer option in -// service config won't take effect. But when there's no grpclb address in a new -// resolved address list, balancer will be switched to the new one. -func (s) TestSwitchBalancerGRPCLBServiceConfig(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}}) - var isPickFirst bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isPickFirst = cc.curBalancerName == PickFirstBalancerName - cc.mu.Unlock() - if isPickFirst { - break - } - time.Sleep(time.Millisecond) - } - if !isPickFirst { - t.Fatalf("after 5 second, cc.balancer is of type %v, not pick_first", cc.curBalancerName) - } - - // ClientConn will switch balancer to grpclb when receives an address of - // type GRPCLB. - addrs := []resolver.Address{{Addr: "grpclb", Type: resolver.GRPCLB}} - r.UpdateState(resolver.State{Addresses: addrs}) - var isGRPCLB bool - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isGRPCLB = cc.curBalancerName == "grpclb" - cc.mu.Unlock() - if isGRPCLB { - break - } - time.Sleep(time.Millisecond) - } - if !isGRPCLB { - t.Fatalf("after 5 second, cc.balancer is of type %v, not grpclb", cc.curBalancerName) - } - - sc := parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`) - r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc}) - var isRoundRobin bool - for i := 0; i < 200; i++ { - cc.mu.Lock() - isRoundRobin = cc.curBalancerName == "round_robin" - cc.mu.Unlock() - if isRoundRobin { - break - } - time.Sleep(time.Millisecond) - } - // Balancer should NOT switch to round_robin because resolved list contains - // grpclb. - if isRoundRobin { - t.Fatalf("within 200 ms, cc.balancer switched to round_robin, want grpclb") - } - - // Switch balancer back. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "backend"}}, ServiceConfig: sc}) - for i := 0; i < 5000; i++ { - cc.mu.Lock() - isRoundRobin = cc.curBalancerName == "round_robin" - cc.mu.Unlock() - if isRoundRobin { - break - } - time.Sleep(time.Millisecond) - } - if !isRoundRobin { - t.Fatalf("after 5 second, cc.balancer is of type %v, not round_robin", cc.curBalancerName) - } -} - -// Test that when switching to grpclb fails because grpclb is not registered, -// the fallback balancer will only get backend addresses, not the grpclb server -// address. -// -// The tests sends 3 server addresses (all backends) as resolved addresses, but -// claim the first one is grpclb server. The all RPCs should all be send to the -// other addresses, not the first one. -func (s) TestSwitchBalancerGRPCLBWithGRPCLBNotRegistered(t *testing.T) { - internal.BalancerUnregister("grpclb") - defer balancer.Register(&magicalLB{}) - - r := manual.NewBuilderWithScheme("whatever") - - const numServers = 3 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}}}) - // The default balancer is pickfirst. - if err := checkPickFirst(cc, servers[1:]); err != nil { - t.Fatalf("check pickfirst returned non-nil error: %v", err) - } - // Try switching to grpclb by sending servers[0] as grpclb address. It's - // expected that servers[0] will be filtered out, so it will not be used by - // the balancer. - // - // If the filtering failed, servers[0] will be used for RPCs and the RPCs - // will succeed. The following checks will catch this and fail. - addrs := []resolver.Address{ - {Addr: servers[0].addr, Type: resolver.GRPCLB}, - {Addr: servers[1].addr}, {Addr: servers[2].addr}} - r.UpdateState(resolver.State{Addresses: addrs}) - // Still check for pickfirst, but only with server[1] and server[2]. - if err := checkPickFirst(cc, servers[1:]); err != nil { - t.Fatalf("check pickfirst returned non-nil error: %v", err) - } - // Switch to roundrobin, and check against server[1] and server[2]. - cc.updateResolverState(resolver.State{ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`), Addresses: addrs}, nil) - if err := checkRoundRobin(cc, servers[1:]); err != nil { - t.Fatalf("check roundrobin returned non-nil error: %v", err) - } -} - -func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { - scpr := r.CC.ParseServiceConfig(s) - if scpr.Err != nil { - panic(fmt.Sprintf("Error parsing config %q: %v", s, scpr.Err)) - } - return scpr -} diff --git a/benchmark/benchmain/main.go b/benchmark/benchmain/main.go index 55427035f41b..f4b96a5d460b 100644 --- a/benchmark/benchmain/main.go +++ b/benchmark/benchmain/main.go @@ -21,10 +21,10 @@ Package main provides benchmark with setting flags. An example to run some benchmarks with profiling enabled: -go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \ - -compression=gzip -maxConcurrentCalls=1 -trace=off \ - -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \ - -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result + go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \ + -compression=gzip -maxConcurrentCalls=1 -trace=off \ + -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \ + -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result As a suggestion, when creating a branch, you can run this benchmark and save the result file "-resultFile=basePerf", and later when you at the middle of the work or finish the @@ -32,10 +32,11 @@ work, you can get the benchmark result and compare it with the base anytime. Assume there are two result files names as "basePerf" and "curPerf" created by adding -resultFile=basePerf and -resultFile=curPerf. - To format the curPerf, run: - go run benchmark/benchresult/main.go curPerf - To observe how the performance changes based on a base result, run: - go run benchmark/benchresult/main.go basePerf curPerf + + To format the curPerf, run: + go run benchmark/benchresult/main.go curPerf + To observe how the performance changes based on a base result, run: + go run benchmark/benchresult/main.go basePerf curPerf */ package main @@ -45,13 +46,14 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" + "math/rand" "net" "os" "reflect" "runtime" "runtime/pprof" + "strconv" "strings" "sync" "sync/atomic" @@ -63,6 +65,7 @@ import ( "google.golang.org/grpc/benchmark/flags" "google.golang.org/grpc/benchmark/latency" "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" @@ -79,7 +82,8 @@ var ( traceMode = flags.StringWithAllowedValues("trace", toggleModeOff, fmt.Sprintf("Trace mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) preloaderMode = flags.StringWithAllowedValues("preloader", toggleModeOff, - fmt.Sprintf("Preloader mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) + fmt.Sprintf("Preloader mode - One of: %v, preloader works only in streaming and unconstrained modes and will be ignored in unary mode", + strings.Join(allToggleModes, ", ")), allToggleModes) channelzOn = flags.StringWithAllowedValues("channelz", toggleModeOff, fmt.Sprintf("Channelz mode - One of: %v", strings.Join(allToggleModes, ", ")), allToggleModes) compressorMode = flags.StringWithAllowedValues("compression", compModeOff, @@ -104,6 +108,13 @@ var ( useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O") enableKeepalive = flag.Bool("enable_keepalive", false, "Enable client keepalive. \n"+ "Keepalive.Time is set to 10s, Keepalive.Timeout is set to 1s, Keepalive.PermitWithoutStream is set to true.") + clientReadBufferSize = flags.IntSlice("clientReadBufferSize", []int{-1}, "Configures the client read buffer size in bytes. If negative, use the default - may be a a comma-separated list") + clientWriteBufferSize = flags.IntSlice("clientWriteBufferSize", []int{-1}, "Configures the client write buffer size in bytes. If negative, use the default - may be a a comma-separated list") + serverReadBufferSize = flags.IntSlice("serverReadBufferSize", []int{-1}, "Configures the server read buffer size in bytes. If negative, use the default - may be a a comma-separated list") + serverWriteBufferSize = flags.IntSlice("serverWriteBufferSize", []int{-1}, "Configures the server write buffer size in bytes. If negative, use the default - may be a a comma-separated list") + sleepBetweenRPCs = flags.DurationSlice("sleepBetweenRPCs", []time.Duration{0}, "Configures the maximum amount of time the client should sleep between consecutive RPCs - may be a a comma-separated list") + connections = flag.Int("connections", 1, "The number of connections. Each connection will handle maxConcurrentCalls RPC streams") + recvBufferPool = flags.StringWithAllowedValues("recvBufferPool", recvBufferPoolNil, "Configures the shared receive buffer pool. One of: nil, simple, all", allRecvBufferPools) logger = grpclog.Component("benchmark") ) @@ -128,6 +139,10 @@ const ( networkModeLAN = "LAN" networkModeWAN = "WAN" networkLongHaul = "Longhaul" + // Shared recv buffer pool + recvBufferPoolNil = "nil" + recvBufferPoolSimple = "simple" + recvBufferPoolAll = "all" numStatsBuckets = 10 warmupCallCount = 10 @@ -139,6 +154,7 @@ var ( allCompModes = []string{compModeOff, compModeGzip, compModeNop, compModeAll} allToggleModes = []string{toggleModeOff, toggleModeOn, toggleModeBoth} allNetworkModes = []string{networkModeNone, networkModeLocal, networkModeLAN, networkModeWAN, networkLongHaul} + allRecvBufferPools = []string{recvBufferPoolNil, recvBufferPoolSimple, recvBufferPoolAll} defaultReadLatency = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay. defaultReadKbps = []int{0, 10240} // if non-positive, infinite defaultReadMTU = []int{0} // if non-positive, infinite @@ -189,9 +205,9 @@ func runModesFromWorkloads(workload string) runModes { type startFunc func(mode string, bf stats.Features) type stopFunc func(count uint64) type ucStopFunc func(req uint64, resp uint64) -type rpcCallFunc func(pos int) -type rpcSendFunc func(pos int) -type rpcRecvFunc func(pos int) +type rpcCallFunc func(cn, pos int) +type rpcSendFunc func(cn, pos int) +type rpcRecvFunc func(cn, pos int) type rpcCleanupFunc func() func unaryBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats) { @@ -206,7 +222,7 @@ func streamBenchmark(start startFunc, stop stopFunc, bf stats.Features, s *stats runBenchmark(caller, start, stop, bf, s, workloadsStreaming) } -func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Features, s *stats.Stats) { +func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Features) { var sender rpcSendFunc var recver rpcRecvFunc var cleanup rpcCleanupFunc @@ -228,40 +244,46 @@ func unconstrainedStreamBenchmark(start startFunc, stop ucStopFunc, bf stats.Fea bmEnd := time.Now().Add(bf.BenchTime + warmuptime) var wg sync.WaitGroup - wg.Add(2 * bf.MaxConcurrentCalls) - for i := 0; i < bf.MaxConcurrentCalls; i++ { - go func(pos int) { - defer wg.Done() - for { - t := time.Now() - if t.After(bmEnd) { - return + wg.Add(2 * bf.Connections * bf.MaxConcurrentCalls) + maxSleep := int(bf.SleepBetweenRPCs) + for cn := 0; cn < bf.Connections; cn++ { + for pos := 0; pos < bf.MaxConcurrentCalls; pos++ { + go func(cn, pos int) { + defer wg.Done() + for { + if maxSleep > 0 { + time.Sleep(time.Duration(rand.Intn(maxSleep))) + } + t := time.Now() + if t.After(bmEnd) { + return + } + sender(cn, pos) + atomic.AddUint64(&req, 1) } - sender(pos) - atomic.AddUint64(&req, 1) - } - }(i) - go func(pos int) { - defer wg.Done() - for { - t := time.Now() - if t.After(bmEnd) { - return + }(cn, pos) + go func(cn, pos int) { + defer wg.Done() + for { + t := time.Now() + if t.After(bmEnd) { + return + } + recver(cn, pos) + atomic.AddUint64(&resp, 1) } - recver(pos) - atomic.AddUint64(&resp, 1) - } - }(i) + }(cn, pos) + } } wg.Wait() stop(req, resp) } -// makeClient returns a gRPC client for the grpc.testing.BenchmarkService +// makeClients returns a gRPC client (or multiple clients) for the grpc.testing.BenchmarkService // service. The client is configured using the different options in the passed // 'bf'. Also returns a cleanup function to close the client and release // resources. -func makeClient(bf stats.Features) (testgrpc.BenchmarkServiceClient, func()) { +func makeClients(bf stats.Features) ([]testgrpc.BenchmarkServiceClient, func()) { nw := &latency.Network{Kbps: bf.Kbps, Latency: bf.Latency, MTU: bf.MTU} opts := []grpc.DialOption{} sopts := []grpc.ServerOption{} @@ -304,8 +326,30 @@ func makeClient(bf stats.Features) (testgrpc.BenchmarkServiceClient, func()) { }), ) } + if bf.ClientReadBufferSize >= 0 { + opts = append(opts, grpc.WithReadBufferSize(bf.ClientReadBufferSize)) + } + if bf.ClientWriteBufferSize >= 0 { + opts = append(opts, grpc.WithWriteBufferSize(bf.ClientWriteBufferSize)) + } + if bf.ServerReadBufferSize >= 0 { + sopts = append(sopts, grpc.ReadBufferSize(bf.ServerReadBufferSize)) + } + if bf.ServerWriteBufferSize >= 0 { + sopts = append(sopts, grpc.WriteBufferSize(bf.ServerWriteBufferSize)) + } + switch bf.RecvBufferPool { + case recvBufferPoolNil: + // Do nothing. + case recvBufferPoolSimple: + opts = append(opts, grpc.WithRecvBufferPool(grpc.NewSharedBufferPool())) + sopts = append(sopts, grpc.RecvBufferPool(grpc.NewSharedBufferPool())) + default: + logger.Fatalf("Unknown shared recv buffer pool type: %v", bf.RecvBufferPool) + } + sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(bf.MaxConcurrentCalls+1))) - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) var lis net.Listener if bf.UseBufConn { @@ -328,16 +372,24 @@ func makeClient(bf stats.Features) (testgrpc.BenchmarkServiceClient, func()) { } lis = nw.Listener(lis) stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...) - conn := bm.NewClientConn("" /* target not used */, opts...) - return testgrpc.NewBenchmarkServiceClient(conn), func() { - conn.Close() + conns := make([]*grpc.ClientConn, bf.Connections) + clients := make([]testgrpc.BenchmarkServiceClient, bf.Connections) + for cn := 0; cn < bf.Connections; cn++ { + conns[cn] = bm.NewClientConn("" /* target not used */, opts...) + clients[cn] = testgrpc.NewBenchmarkServiceClient(conns[cn]) + } + + return clients, func() { + for _, conn := range conns { + conn.Close() + } stopper() } } func makeFuncUnary(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { - tc, cleanup := makeClient(bf) - return func(int) { + clients, cleanup := makeClients(bf) + return func(cn, pos int) { reqSizeBytes := bf.ReqSizeBytes respSizeBytes := bf.RespSizeBytes if bf.ReqPayloadCurve != nil { @@ -346,23 +398,19 @@ func makeFuncUnary(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { if bf.RespPayloadCurve != nil { respSizeBytes = bf.RespPayloadCurve.ChooseRandom() } - unaryCaller(tc, reqSizeBytes, respSizeBytes) + unaryCaller(clients[cn], reqSizeBytes, respSizeBytes) }, cleanup } func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { - tc, cleanup := makeClient(bf) + streams, req, cleanup := setupStream(bf, false) - streams := make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) - for i := 0; i < bf.MaxConcurrentCalls; i++ { - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) - } - streams[i] = stream + var preparedMsg [][]*grpc.PreparedMsg + if bf.EnablePreloader { + preparedMsg = prepareMessages(streams, req) } - return func(pos int) { + return func(cn, pos int) { reqSizeBytes := bf.ReqSizeBytes respSizeBytes := bf.RespSizeBytes if bf.ReqPayloadCurve != nil { @@ -371,51 +419,66 @@ func makeFuncStream(bf stats.Features) (rpcCallFunc, rpcCleanupFunc) { if bf.RespPayloadCurve != nil { respSizeBytes = bf.RespPayloadCurve.ChooseRandom() } - streamCaller(streams[pos], reqSizeBytes, respSizeBytes) + var req interface{} + if bf.EnablePreloader { + req = preparedMsg[cn][pos] + } else { + pl := bm.NewPayload(testpb.PayloadType_COMPRESSABLE, reqSizeBytes) + req = &testpb.SimpleRequest{ + ResponseType: pl.Type, + ResponseSize: int32(respSizeBytes), + Payload: pl, + } + } + streamCaller(streams[cn][pos], req) }, cleanup } func makeFuncUnconstrainedStreamPreloaded(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) { - streams, req, cleanup := setupUnconstrainedStream(bf) + streams, req, cleanup := setupStream(bf, true) - preparedMsg := make([]*grpc.PreparedMsg, len(streams)) - for i, stream := range streams { - preparedMsg[i] = &grpc.PreparedMsg{} - err := preparedMsg[i].Encode(stream, req) - if err != nil { - logger.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[i], req, stream, err) - } - } + preparedMsg := prepareMessages(streams, req) - return func(pos int) { - streams[pos].SendMsg(preparedMsg[pos]) - }, func(pos int) { - streams[pos].Recv() + return func(cn, pos int) { + streams[cn][pos].SendMsg(preparedMsg[cn][pos]) + }, func(cn, pos int) { + streams[cn][pos].Recv() }, cleanup } func makeFuncUnconstrainedStream(bf stats.Features) (rpcSendFunc, rpcRecvFunc, rpcCleanupFunc) { - streams, req, cleanup := setupUnconstrainedStream(bf) + streams, req, cleanup := setupStream(bf, true) - return func(pos int) { - streams[pos].Send(req) - }, func(pos int) { - streams[pos].Recv() + return func(cn, pos int) { + streams[cn][pos].Send(req) + }, func(cn, pos int) { + streams[cn][pos].Recv() }, cleanup } -func setupUnconstrainedStream(bf stats.Features) ([]testgrpc.BenchmarkService_StreamingCallClient, *testpb.SimpleRequest, rpcCleanupFunc) { - tc, cleanup := makeClient(bf) +func setupStream(bf stats.Features, unconstrained bool) ([][]testgrpc.BenchmarkService_StreamingCallClient, *testpb.SimpleRequest, rpcCleanupFunc) { + clients, cleanup := makeClients(bf) - streams := make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) - md := metadata.Pairs(benchmark.UnconstrainedStreamingHeader, "1") - ctx := metadata.NewOutgoingContext(context.Background(), md) - for i := 0; i < bf.MaxConcurrentCalls; i++ { - stream, err := tc.StreamingCall(ctx) - if err != nil { - logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + streams := make([][]testgrpc.BenchmarkService_StreamingCallClient, bf.Connections) + ctx := context.Background() + if unconstrained { + md := metadata.Pairs(benchmark.UnconstrainedStreamingHeader, "1", benchmark.UnconstrainedStreamingDelayHeader, bf.SleepBetweenRPCs.String()) + ctx = metadata.NewOutgoingContext(ctx, md) + } + if bf.EnablePreloader { + md := metadata.Pairs(benchmark.PreloadMsgSizeHeader, strconv.Itoa(bf.RespSizeBytes), benchmark.UnconstrainedStreamingDelayHeader, bf.SleepBetweenRPCs.String()) + ctx = metadata.NewOutgoingContext(ctx, md) + } + for cn := 0; cn < bf.Connections; cn++ { + tc := clients[cn] + streams[cn] = make([]testgrpc.BenchmarkService_StreamingCallClient, bf.MaxConcurrentCalls) + for pos := 0; pos < bf.MaxConcurrentCalls; pos++ { + stream, err := tc.StreamingCall(ctx) + if err != nil { + logger.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) + } + streams[cn][pos] = stream } - streams[i] = stream } pl := bm.NewPayload(testpb.PayloadType_COMPRESSABLE, bf.ReqSizeBytes) @@ -428,6 +491,20 @@ func setupUnconstrainedStream(bf stats.Features) ([]testgrpc.BenchmarkService_St return streams, req, cleanup } +func prepareMessages(streams [][]testgrpc.BenchmarkService_StreamingCallClient, req *testpb.SimpleRequest) [][]*grpc.PreparedMsg { + preparedMsg := make([][]*grpc.PreparedMsg, len(streams)) + for cn, connStreams := range streams { + preparedMsg[cn] = make([]*grpc.PreparedMsg, len(connStreams)) + for pos, stream := range connStreams { + preparedMsg[cn][pos] = &grpc.PreparedMsg{} + if err := preparedMsg[cn][pos].Encode(stream, req); err != nil { + logger.Fatalf("%v.Encode(%v, %v) = %v", preparedMsg[cn][pos], req, stream, err) + } + } + } + return preparedMsg +} + // Makes a UnaryCall gRPC request using the given BenchmarkServiceClient and // request and response sizes. func unaryCaller(client testgrpc.BenchmarkServiceClient, reqSize, respSize int) { @@ -436,39 +513,52 @@ func unaryCaller(client testgrpc.BenchmarkServiceClient, reqSize, respSize int) } } -func streamCaller(stream testgrpc.BenchmarkService_StreamingCallClient, reqSize, respSize int) { - if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { +func streamCaller(stream testgrpc.BenchmarkService_StreamingCallClient, req interface{}) { + if err := bm.DoStreamingRoundTripPreloaded(stream, req); err != nil { logger.Fatalf("DoStreamingRoundTrip failed: %v", err) } } func runBenchmark(caller rpcCallFunc, start startFunc, stop stopFunc, bf stats.Features, s *stats.Stats, mode string) { - // Warm up connection. - for i := 0; i < warmupCallCount; i++ { - caller(0) + // if SleepBetweenRPCs > 0 we skip the warmup because otherwise + // we are going to send a set of simultaneous requests on every connection, + // which is something we are trying to avoid when using SleepBetweenRPCs. + if bf.SleepBetweenRPCs == 0 { + // Warm up connections. + for i := 0; i < warmupCallCount; i++ { + for cn := 0; cn < bf.Connections; cn++ { + caller(cn, 0) + } + } } // Run benchmark. start(mode, bf) var wg sync.WaitGroup - wg.Add(bf.MaxConcurrentCalls) + wg.Add(bf.Connections * bf.MaxConcurrentCalls) bmEnd := time.Now().Add(bf.BenchTime) + maxSleep := int(bf.SleepBetweenRPCs) var count uint64 - for i := 0; i < bf.MaxConcurrentCalls; i++ { - go func(pos int) { - defer wg.Done() - for { - t := time.Now() - if t.After(bmEnd) { - return + for cn := 0; cn < bf.Connections; cn++ { + for pos := 0; pos < bf.MaxConcurrentCalls; pos++ { + go func(cn, pos int) { + defer wg.Done() + for { + if maxSleep > 0 { + time.Sleep(time.Duration(rand.Intn(maxSleep))) + } + t := time.Now() + if t.After(bmEnd) { + return + } + start := time.Now() + caller(cn, pos) + elapse := time.Since(start) + atomic.AddUint64(&count, 1) + s.AddDuration(elapse) } - start := time.Now() - caller(pos) - elapse := time.Since(start) - atomic.AddUint64(&count, 1) - s.AddDuration(elapse) - } - }(i) + }(cn, pos) + } } wg.Wait() stop(count) @@ -486,6 +576,7 @@ type benchOpts struct { benchmarkResultFile string useBufconn bool enableKeepalive bool + connections int features *featureOpts } @@ -494,18 +585,24 @@ type benchOpts struct { // features through command line flags. We generate all possible combinations // for the provided values and run the benchmarks for each combination. type featureOpts struct { - enableTrace []bool - readLatencies []time.Duration - readKbps []int - readMTU []int - maxConcurrentCalls []int - reqSizeBytes []int - respSizeBytes []int - reqPayloadCurves []*stats.PayloadCurve - respPayloadCurves []*stats.PayloadCurve - compModes []string - enableChannelz []bool - enablePreloader []bool + enableTrace []bool + readLatencies []time.Duration + readKbps []int + readMTU []int + maxConcurrentCalls []int + reqSizeBytes []int + respSizeBytes []int + reqPayloadCurves []*stats.PayloadCurve + respPayloadCurves []*stats.PayloadCurve + compModes []string + enableChannelz []bool + enablePreloader []bool + clientReadBufferSize []int + clientWriteBufferSize []int + serverReadBufferSize []int + serverWriteBufferSize []int + sleepBetweenRPCs []time.Duration + recvBufferPools []string } // makeFeaturesNum returns a slice of ints of size 'maxFeatureIndex' where each @@ -542,6 +639,18 @@ func makeFeaturesNum(b *benchOpts) []int { featuresNum[i] = len(b.features.enableChannelz) case stats.EnablePreloaderIndex: featuresNum[i] = len(b.features.enablePreloader) + case stats.ClientReadBufferSize: + featuresNum[i] = len(b.features.clientReadBufferSize) + case stats.ClientWriteBufferSize: + featuresNum[i] = len(b.features.clientWriteBufferSize) + case stats.ServerReadBufferSize: + featuresNum[i] = len(b.features.serverReadBufferSize) + case stats.ServerWriteBufferSize: + featuresNum[i] = len(b.features.serverWriteBufferSize) + case stats.SleepBetweenRPCs: + featuresNum[i] = len(b.features.sleepBetweenRPCs) + case stats.RecvBufferPool: + featuresNum[i] = len(b.features.recvBufferPools) default: log.Fatalf("Unknown feature index %v in generateFeatures. maxFeatureIndex is %v", i, stats.MaxFeatureIndex) } @@ -595,15 +704,22 @@ func (b *benchOpts) generateFeatures(featuresNum []int) []stats.Features { UseBufConn: b.useBufconn, EnableKeepalive: b.enableKeepalive, BenchTime: b.benchTime, + Connections: b.connections, // These features can potentially change for each iteration. - EnableTrace: b.features.enableTrace[curPos[stats.EnableTraceIndex]], - Latency: b.features.readLatencies[curPos[stats.ReadLatenciesIndex]], - Kbps: b.features.readKbps[curPos[stats.ReadKbpsIndex]], - MTU: b.features.readMTU[curPos[stats.ReadMTUIndex]], - MaxConcurrentCalls: b.features.maxConcurrentCalls[curPos[stats.MaxConcurrentCallsIndex]], - ModeCompressor: b.features.compModes[curPos[stats.CompModesIndex]], - EnableChannelz: b.features.enableChannelz[curPos[stats.EnableChannelzIndex]], - EnablePreloader: b.features.enablePreloader[curPos[stats.EnablePreloaderIndex]], + EnableTrace: b.features.enableTrace[curPos[stats.EnableTraceIndex]], + Latency: b.features.readLatencies[curPos[stats.ReadLatenciesIndex]], + Kbps: b.features.readKbps[curPos[stats.ReadKbpsIndex]], + MTU: b.features.readMTU[curPos[stats.ReadMTUIndex]], + MaxConcurrentCalls: b.features.maxConcurrentCalls[curPos[stats.MaxConcurrentCallsIndex]], + ModeCompressor: b.features.compModes[curPos[stats.CompModesIndex]], + EnableChannelz: b.features.enableChannelz[curPos[stats.EnableChannelzIndex]], + EnablePreloader: b.features.enablePreloader[curPos[stats.EnablePreloaderIndex]], + ClientReadBufferSize: b.features.clientReadBufferSize[curPos[stats.ClientReadBufferSize]], + ClientWriteBufferSize: b.features.clientWriteBufferSize[curPos[stats.ClientWriteBufferSize]], + ServerReadBufferSize: b.features.serverReadBufferSize[curPos[stats.ServerReadBufferSize]], + ServerWriteBufferSize: b.features.serverWriteBufferSize[curPos[stats.ServerWriteBufferSize]], + SleepBetweenRPCs: b.features.sleepBetweenRPCs[curPos[stats.SleepBetweenRPCs]], + RecvBufferPool: b.features.recvBufferPools[curPos[stats.RecvBufferPool]], } if len(b.features.reqPayloadCurves) == 0 { f.ReqSizeBytes = b.features.reqSizeBytes[curPos[stats.ReqSizeBytesIndex]] @@ -659,17 +775,24 @@ func processFlags() *benchOpts { benchmarkResultFile: *benchmarkResultFile, useBufconn: *useBufconn, enableKeepalive: *enableKeepalive, + connections: *connections, features: &featureOpts{ - enableTrace: setToggleMode(*traceMode), - readLatencies: append([]time.Duration(nil), *readLatency...), - readKbps: append([]int(nil), *readKbps...), - readMTU: append([]int(nil), *readMTU...), - maxConcurrentCalls: append([]int(nil), *maxConcurrentCalls...), - reqSizeBytes: append([]int(nil), *readReqSizeBytes...), - respSizeBytes: append([]int(nil), *readRespSizeBytes...), - compModes: setCompressorMode(*compressorMode), - enableChannelz: setToggleMode(*channelzOn), - enablePreloader: setToggleMode(*preloaderMode), + enableTrace: setToggleMode(*traceMode), + readLatencies: append([]time.Duration(nil), *readLatency...), + readKbps: append([]int(nil), *readKbps...), + readMTU: append([]int(nil), *readMTU...), + maxConcurrentCalls: append([]int(nil), *maxConcurrentCalls...), + reqSizeBytes: append([]int(nil), *readReqSizeBytes...), + respSizeBytes: append([]int(nil), *readRespSizeBytes...), + compModes: setCompressorMode(*compressorMode), + enableChannelz: setToggleMode(*channelzOn), + enablePreloader: setToggleMode(*preloaderMode), + clientReadBufferSize: append([]int(nil), *clientReadBufferSize...), + clientWriteBufferSize: append([]int(nil), *clientWriteBufferSize...), + serverReadBufferSize: append([]int(nil), *serverReadBufferSize...), + serverWriteBufferSize: append([]int(nil), *serverWriteBufferSize...), + sleepBetweenRPCs: append([]time.Duration(nil), *sleepBetweenRPCs...), + recvBufferPools: setRecvBufferPool(*recvBufferPool), }, } @@ -681,6 +804,9 @@ func processFlags() *benchOpts { if len(opts.features.reqSizeBytes) != 0 { log.Fatalf("you may not specify -reqPayloadCurveFiles and -reqSizeBytes at the same time") } + if len(opts.features.enablePreloader) != 0 { + log.Fatalf("you may not specify -reqPayloadCurveFiles and -preloader at the same time") + } for _, file := range *reqPayloadCurveFiles { pc, err := stats.NewPayloadCurve(file) if err != nil { @@ -698,6 +824,9 @@ func processFlags() *benchOpts { if len(opts.features.respSizeBytes) != 0 { log.Fatalf("you may not specify -respPayloadCurveFiles and -respSizeBytes at the same time") } + if len(opts.features.enablePreloader) != 0 { + log.Fatalf("you may not specify -respPayloadCurveFiles and -preloader at the same time") + } for _, file := range *respPayloadCurveFiles { pc, err := stats.NewPayloadCurve(file) if err != nil { @@ -745,6 +874,19 @@ func setCompressorMode(val string) []string { } } +func setRecvBufferPool(val string) []string { + switch val { + case recvBufferPoolNil, recvBufferPoolSimple: + return []string{val} + case recvBufferPoolAll: + return []string{recvBufferPoolNil, recvBufferPoolSimple} + default: + // This should never happen because a wrong value passed to this flag would + // be caught during flag.Parse(). + return []string{} + } +} + func main() { opts := processFlags() before(opts) @@ -771,7 +913,7 @@ func main() { streamBenchmark(start, stop, bf, s) } if opts.rModes.unconstrained { - unconstrainedStreamBenchmark(start, ucStop, bf, s) + unconstrainedStreamBenchmark(start, ucStop, bf) } } after(opts, s.GetResults()) @@ -832,7 +974,7 @@ func (nopCompressor) Do(w io.Writer, p []byte) error { return err } if n != len(p) { - return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p)) + return fmt.Errorf("nopCompressor.Write: wrote %d bytes; want %d", n, len(p)) } return nil } @@ -842,5 +984,5 @@ func (nopCompressor) Type() string { return compModeNop } // nopDecompressor is a decompressor that just copies data. type nopDecompressor struct{} -func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) } +func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return io.ReadAll(r) } func (nopDecompressor) Type() string { return compModeNop } diff --git a/benchmark/benchmark.go b/benchmark/benchmark.go index a8ae40fa6ada..27101954aa30 100644 --- a/benchmark/benchmark.go +++ b/benchmark/benchmark.go @@ -26,7 +26,10 @@ import ( "fmt" "io" "log" + "math/rand" "net" + "strconv" + "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -77,13 +80,39 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* // of ping-pong. const UnconstrainedStreamingHeader = "unconstrained-streaming" +// UnconstrainedStreamingDelayHeader is used to pass the maximum amount of time +// the server should sleep between consecutive RPC responses. +const UnconstrainedStreamingDelayHeader = "unconstrained-streaming-delay" + +// PreloadMsgSizeHeader indicates that the client is going to ask for +// a fixed response size and passes this size to the server. +// The server is expected to preload the response on startup. +const PreloadMsgSizeHeader = "preload-msg-size" + func (s *testServer) StreamingCall(stream testgrpc.BenchmarkService_StreamingCallServer) error { + preloadMsgSize := 0 + if md, ok := metadata.FromIncomingContext(stream.Context()); ok && len(md[PreloadMsgSizeHeader]) != 0 { + val := md[PreloadMsgSizeHeader][0] + var err error + preloadMsgSize, err = strconv.Atoi(val) + if err != nil { + return fmt.Errorf("%q header value is not an integer: %s", PreloadMsgSizeHeader, err) + } + } + if md, ok := metadata.FromIncomingContext(stream.Context()); ok && len(md[UnconstrainedStreamingHeader]) != 0 { - return s.UnconstrainedStreamingCall(stream) + return s.UnconstrainedStreamingCall(stream, preloadMsgSize) } response := &testpb.SimpleResponse{ Payload: new(testpb.Payload), } + preloadedResponse := &grpc.PreparedMsg{} + if preloadMsgSize > 0 { + setPayload(response.Payload, testpb.PayloadType_COMPRESSABLE, preloadMsgSize) + if err := preloadedResponse.Encode(stream, response); err != nil { + return err + } + } in := new(testpb.SimpleRequest) for { // use ServerStream directly to reuse the same testpb.SimpleRequest object @@ -95,14 +124,29 @@ func (s *testServer) StreamingCall(stream testgrpc.BenchmarkService_StreamingCal if err != nil { return err } - setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) - if err := stream.Send(response); err != nil { + if preloadMsgSize > 0 { + err = stream.SendMsg(preloadedResponse) + } else { + setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) + err = stream.Send(response) + } + if err != nil { return err } } } -func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService_StreamingCallServer) error { +func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService_StreamingCallServer, preloadMsgSize int) error { + maxSleep := 0 + if md, ok := metadata.FromIncomingContext(stream.Context()); ok && len(md[UnconstrainedStreamingDelayHeader]) != 0 { + val := md[UnconstrainedStreamingDelayHeader][0] + d, err := time.ParseDuration(val) + if err != nil { + return fmt.Errorf("can't parse %q header: %s", UnconstrainedStreamingDelayHeader, err) + } + maxSleep = int(d) + } + in := new(testpb.SimpleRequest) // Receive a message to learn response type and size. err := stream.RecvMsg(in) @@ -119,6 +163,13 @@ func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService } setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) + preloadedResponse := &grpc.PreparedMsg{} + if preloadMsgSize > 0 { + if err := preloadedResponse.Encode(stream, response); err != nil { + return err + } + } + go func() { for { // Using RecvMsg rather than Recv to prevent reallocation of SimpleRequest. @@ -135,9 +186,17 @@ func (s *testServer) UnconstrainedStreamingCall(stream testgrpc.BenchmarkService go func() { for { - err := stream.Send(response) + if maxSleep > 0 { + time.Sleep(time.Duration(rand.Intn(maxSleep))) + } + var err error + if preloadMsgSize > 0 { + err = stream.SendMsg(preloadedResponse) + } else { + err = stream.Send(response) + } switch status.Code(err) { - case codes.Unavailable: + case codes.Unavailable, codes.Canceled: return case codes.OK: default: @@ -198,8 +257,6 @@ type ServerInfo struct { // StartServer starts a gRPC server serving a benchmark service according to info. // It returns a function to stop the server. func StartServer(info ServerInfo, opts ...grpc.ServerOption) func() { - opts = append(opts, grpc.WriteBufferSize(128*1024)) - opts = append(opts, grpc.ReadBufferSize(128*1024)) s := grpc.NewServer(opts...) switch info.Type { case "protobuf": @@ -241,7 +298,13 @@ func DoStreamingRoundTrip(stream testgrpc.BenchmarkService_StreamingCallClient, ResponseSize: int32(respSize), Payload: pl, } - if err := stream.Send(req); err != nil { + return DoStreamingRoundTripPreloaded(stream, req) +} + +// DoStreamingRoundTripPreloaded performs a round trip for a single streaming rpc with preloaded payload. +func DoStreamingRoundTripPreloaded(stream testgrpc.BenchmarkService_StreamingCallClient, req interface{}) error { + // req could be either *testpb.SimpleRequest or *grpc.PreparedMsg + if err := stream.SendMsg(req); err != nil { return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want ", err) } if _, err := stream.Recv(); err != nil { @@ -278,11 +341,9 @@ func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { // NewClientConnWithContext creates a gRPC client connection to addr using ctx. func NewClientConnWithContext(ctx context.Context, addr string, opts ...grpc.DialOption) *grpc.ClientConn { - opts = append(opts, grpc.WithWriteBufferSize(128*1024)) - opts = append(opts, grpc.WithReadBufferSize(128*1024)) conn, err := grpc.DialContext(ctx, addr, opts...) if err != nil { - logger.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) + logger.Fatalf("NewClientConn(%q) failed to create a ClientConn: %v", addr, err) } return conn } diff --git a/benchmark/benchresult/main.go b/benchmark/benchresult/main.go index 587a0f6bda32..5bd9ce6ff891 100644 --- a/benchmark/benchresult/main.go +++ b/benchmark/benchresult/main.go @@ -18,12 +18,14 @@ /* To format the benchmark result: - go run benchmark/benchresult/main.go resultfile + + go run benchmark/benchresult/main.go resultfile To see the performance change based on a old result: - go run benchmark/benchresult/main.go resultfile_old resultfile -It will print the comparison result of intersection benchmarks between two files. + go run benchmark/benchresult/main.go resultfile_old resultfile + +It will print the comparison result of intersection benchmarks between two files. */ package main diff --git a/benchmark/client/main.go b/benchmark/client/main.go index caf2db70a501..395caf39ea76 100644 --- a/benchmark/client/main.go +++ b/benchmark/client/main.go @@ -51,6 +51,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/benchmark" "google.golang.org/grpc/benchmark/stats" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/syscall" @@ -85,7 +86,7 @@ var ( func main() { flag.Parse() if *testName == "" { - logger.Fatalf("test_name not set") + logger.Fatal("-test_name not set") } req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -135,7 +136,7 @@ func main() { func buildConnections(ctx context.Context) []*grpc.ClientConn { ccs := make([]*grpc.ClientConn, *numConn) for i := range ccs { - ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithInsecure(), grpc.WithBlock()) + ccs[i] = benchmark.NewClientConnWithContext(ctx, "localhost:"+*port, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) } return ccs } diff --git a/benchmark/latency/latency_test.go b/benchmark/latency/latency_test.go index 5d08b90b4fa2..787373ca30be 100644 --- a/benchmark/latency/latency_test.go +++ b/benchmark/latency/latency_test.go @@ -86,7 +86,7 @@ func (s) TestConn(t *testing.T) { wantSleeps(latency) // Connection creation delay. // 1 kbps = 128 Bps. Divides evenly by 1 second using nanos. - byteLatency := time.Duration(time.Second / 128) + byteLatency := time.Second / 128 write := func(b []byte) { n, err := c.Write(b) diff --git a/benchmark/primitives/code_string_test.go b/benchmark/primitives/code_string_test.go index 51b1ee48cf3c..095d0045c188 100644 --- a/benchmark/primitives/code_string_test.go +++ b/benchmark/primitives/code_string_test.go @@ -87,49 +87,39 @@ func (i codeBench) StringUsingMap() string { } func BenchmarkCodeStringStringer(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 17)) _ = c.String() } - b.StopTimer() } func BenchmarkCodeStringMap(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 17)) _ = c.StringUsingMap() } - b.StopTimer() } // codes.Code.String() does a switch. func BenchmarkCodeStringSwitch(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codes.Code(uint32(i % 17)) _ = c.String() } - b.StopTimer() } // Testing all codes (0<=c<=16) and also one overflow (17). func BenchmarkCodeStringStringerWithOverflow(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codeBench(uint32(i % 18)) _ = c.String() } - b.StopTimer() } // Testing all codes (0<=c<=16) and also one overflow (17). func BenchmarkCodeStringSwitchWithOverflow(b *testing.B) { - b.ResetTimer() for i := 0; i < b.N; i++ { c := codes.Code(uint32(i % 18)) _ = c.String() } - b.StopTimer() } diff --git a/benchmark/server/main.go b/benchmark/server/main.go index 5a82b1c78012..144d090b9c4a 100644 --- a/benchmark/server/main.go +++ b/benchmark/server/main.go @@ -20,6 +20,7 @@ Package main provides a server used for benchmarking. It launches a server which is listening on port 50051. An example to start the server can be found at: + go run benchmark/server/main.go -test_name=grpc_test After starting the server, the client can be run separately and used to test @@ -53,7 +54,7 @@ var ( func main() { flag.Parse() if *testName == "" { - logger.Fatalf("test name not set") + logger.Fatal("-test_name not set") } lis, err := net.Listen("tcp", ":"+*port) if err != nil { diff --git a/benchmark/stats/curve.go b/benchmark/stats/curve.go index 68821bcc2690..124183dac2ea 100644 --- a/benchmark/stats/curve.go +++ b/benchmark/stats/curve.go @@ -23,7 +23,6 @@ import ( "encoding/csv" "encoding/hex" "fmt" - "io/ioutil" "math" "math/rand" "os" @@ -81,7 +80,7 @@ func (pcr *payloadCurveRange) chooseRandom() int { // sha256file is a helper function that returns a hex string matching the // SHA-256 sum of the input file. func sha256file(file string) (string, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { return "", err } diff --git a/benchmark/stats/histogram.go b/benchmark/stats/histogram.go index f038d26ed0aa..461135f0125c 100644 --- a/benchmark/stats/histogram.go +++ b/benchmark/stats/histogram.go @@ -118,10 +118,6 @@ func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) { } maxBucketDigitLen := len(strconv.FormatFloat(h.Buckets[len(h.Buckets)-1].LowBound, 'f', 6, 64)) - if maxBucketDigitLen < 3 { - // For "inf". - maxBucketDigitLen = 3 - } maxCountDigitLen := len(strconv.FormatInt(h.Count, 10)) percentMulti := 100 / float64(h.Count) @@ -131,9 +127,9 @@ func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) { if i+1 < len(h.Buckets) { fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound/unit) } else { - fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") + upperBound := float64(h.opts.MinValue) + (b.LowBound-float64(h.opts.MinValue))*(1.0+h.opts.GrowthFactor) + fmt.Fprintf(w, "%*f)", maxBucketDigitLen, upperBound/unit) } - accCount += b.Count fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti) @@ -188,6 +184,9 @@ func (h *Histogram) Add(value int64) error { func (h *Histogram) findBucket(value int64) (int, error) { delta := float64(value - h.opts.MinValue) + if delta < 0 { + return 0, fmt.Errorf("no bucket for value: %d", value) + } var b int if delta >= h.opts.BaseBucketSize { // b = log_{1+growthFactor} (delta / baseBucketSize) + 1 diff --git a/benchmark/stats/stats.go b/benchmark/stats/stats.go index 6275c3c3a71c..3989e25dbf4b 100644 --- a/benchmark/stats/stats.go +++ b/benchmark/stats/stats.go @@ -52,6 +52,12 @@ const ( CompModesIndex EnableChannelzIndex EnablePreloaderIndex + ClientReadBufferSize + ClientWriteBufferSize + ServerReadBufferSize + ServerWriteBufferSize + SleepBetweenRPCs + RecvBufferPool // MaxFeatureIndex is a place holder to indicate the total number of feature // indices we have. Any new feature indices should be added above this. @@ -74,6 +80,8 @@ type Features struct { EnableKeepalive bool // BenchTime indicates the duration of the benchmark run. BenchTime time.Duration + // Connections configures the number of grpc connections between client and server. + Connections int // Features defined above are usually the same for all benchmark runs in a // particular invocation, while the features defined below could vary from @@ -109,6 +117,18 @@ type Features struct { EnableChannelz bool // EnablePreloader indicates if preloading was turned on. EnablePreloader bool + // ClientReadBufferSize is the size of the client read buffer in bytes. If negative, use the default buffer size. + ClientReadBufferSize int + // ClientWriteBufferSize is the size of the client write buffer in bytes. If negative, use the default buffer size. + ClientWriteBufferSize int + // ServerReadBufferSize is the size of the server read buffer in bytes. If negative, use the default buffer size. + ServerReadBufferSize int + // ServerWriteBufferSize is the size of the server write buffer in bytes. If negative, use the default buffer size. + ServerWriteBufferSize int + // SleepBetweenRPCs configures optional delay between RPCs. + SleepBetweenRPCs time.Duration + // RecvBufferPool represents the shared recv buffer pool used. + RecvBufferPool string } // String returns all the feature values as a string. @@ -126,10 +146,15 @@ func (f Features) String() string { } return fmt.Sprintf("networkMode_%v-bufConn_%v-keepalive_%v-benchTime_%v-"+ "trace_%v-latency_%v-kbps_%v-MTU_%v-maxConcurrentCalls_%v-%s-%s-"+ - "compressor_%v-channelz_%v-preloader_%v", + "compressor_%v-channelz_%v-preloader_%v-clientReadBufferSize_%v-"+ + "clientWriteBufferSize_%v-serverReadBufferSize_%v-serverWriteBufferSize_%v-"+ + "sleepBetweenRPCs_%v-connections_%v-recvBufferPool_%v-", f.NetworkMode, f.UseBufConn, f.EnableKeepalive, f.BenchTime, f.EnableTrace, f.Latency, f.Kbps, f.MTU, f.MaxConcurrentCalls, reqPayloadString, - respPayloadString, f.ModeCompressor, f.EnableChannelz, f.EnablePreloader) + respPayloadString, f.ModeCompressor, f.EnableChannelz, f.EnablePreloader, + f.ClientReadBufferSize, f.ClientWriteBufferSize, f.ServerReadBufferSize, + f.ServerWriteBufferSize, f.SleepBetweenRPCs, f.Connections, + f.RecvBufferPool) } // SharedFeatures returns the shared features as a pretty printable string. @@ -193,6 +218,18 @@ func (f Features) partialString(b *bytes.Buffer, wantFeatures []bool, sep, delim b.WriteString(fmt.Sprintf("Channelz%v%v%v", sep, f.EnableChannelz, delim)) case EnablePreloaderIndex: b.WriteString(fmt.Sprintf("Preloader%v%v%v", sep, f.EnablePreloader, delim)) + case ClientReadBufferSize: + b.WriteString(fmt.Sprintf("ClientReadBufferSize%v%v%v", sep, f.ClientReadBufferSize, delim)) + case ClientWriteBufferSize: + b.WriteString(fmt.Sprintf("ClientWriteBufferSize%v%v%v", sep, f.ClientWriteBufferSize, delim)) + case ServerReadBufferSize: + b.WriteString(fmt.Sprintf("ServerReadBufferSize%v%v%v", sep, f.ServerReadBufferSize, delim)) + case ServerWriteBufferSize: + b.WriteString(fmt.Sprintf("ServerWriteBufferSize%v%v%v", sep, f.ServerWriteBufferSize, delim)) + case SleepBetweenRPCs: + b.WriteString(fmt.Sprintf("SleepBetweenRPCs%v%v%v", sep, f.SleepBetweenRPCs, delim)) + case RecvBufferPool: + b.WriteString(fmt.Sprintf("RecvBufferPool%v%v%v", sep, f.RecvBufferPool, delim)) default: log.Fatalf("Unknown feature index %v. maxFeatureIndex is %v", i, MaxFeatureIndex) } diff --git a/benchmark/worker/benchmark_client.go b/benchmark/worker/benchmark_client.go index 43af38dc5f78..cc1af4f418a5 100644 --- a/benchmark/worker/benchmark_client.go +++ b/benchmark/worker/benchmark_client.go @@ -31,12 +31,16 @@ import ( "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/status" "google.golang.org/grpc/testdata" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/xds" // To install the xds resolvers and balancers. ) var caFile = flag.String("ca_file", "", "The file containing the CA root cert file") @@ -129,11 +133,11 @@ func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error } creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride) if err != nil { - return nil, nil, status.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) + return nil, nil, status.Errorf(codes.InvalidArgument, "failed to create TLS credentials: %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } // Use byteBufCodec if it is required. @@ -182,11 +186,21 @@ func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benc } } - // TODO add open loop distribution. - switch config.LoadParams.Load.(type) { + // If set, perform an open loop, if not perform a closed loop. An open loop + // asynchronously starts RPCs based on random start times derived from a + // Poisson distribution. A closed loop performs RPCs in a blocking manner, + // and runs the next RPC after the previous RPC completes and returns. + var poissonLambda *float64 + switch t := config.LoadParams.Load.(type) { case *testpb.LoadParams_ClosedLoop: case *testpb.LoadParams_Poisson: - return status.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) + if t.Poisson == nil { + return status.Errorf(codes.InvalidArgument, "poisson is nil, needs to be set") + } + if t.Poisson.OfferedLoad <= 0 { + return status.Errorf(codes.InvalidArgument, "poisson.offered is <= 0: %v, needs to be >0", t.Poisson.OfferedLoad) + } + poissonLambda = &t.Poisson.OfferedLoad default: return status.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) } @@ -195,11 +209,9 @@ func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benc switch config.RpcType { case testpb.RpcType_UNARY: - bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) - // TODO open loop. + bc.unaryLoop(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, poissonLambda) case testpb.RpcType_STREAMING: - bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) - // TODO open loop. + bc.streamingLoop(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType, poissonLambda) default: return status.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) } @@ -243,7 +255,7 @@ func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) return bc, nil } -func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { +func (bc *benchmarkClient) unaryLoop(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, poissonLambda *float64) { for ic, conn := range conns { client := testgrpc.NewBenchmarkServiceClient(conn) // For each connection, create rpcCountPerConn goroutines to do rpc. @@ -257,36 +269,44 @@ func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPe // Now relying on worker client to reserve time to do warm up. // The worker client needs to wait for some time after client is created, // before starting benchmark. - done := make(chan bool) - for { - go func() { - start := time.Now() - if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { + if poissonLambda == nil { // Closed loop. + done := make(chan bool) + for { + go func() { + start := time.Now() + if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { + select { + case <-bc.stop: + case done <- false: + } + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) select { case <-bc.stop: - case done <- false: + case done <- true: } - return - } - elapse := time.Since(start) - bc.lockingHistograms[idx].add(int64(elapse)) + }() select { case <-bc.stop: - case done <- true: + return + case <-done: } - }() - select { - case <-bc.stop: - return - case <-done: } + } else { // Open loop. + timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / *poissonLambda) * float64(time.Second)) + time.AfterFunc(timeBetweenRPCs, func() { + bc.poissonUnary(client, idx, reqSize, respSize, *poissonLambda) + }) } + }(idx) } } } -func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { +func (bc *benchmarkClient) streamingLoop(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string, poissonLambda *float64) { var doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error if payloadType == "bytebuf" { doRPC = benchmark.DoByteBufStreamingRoundTrip @@ -301,33 +321,69 @@ func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCou if err != nil { logger.Fatalf("%v.StreamingCall(_) = _, %v", c, err) } - // Create histogram for each goroutine. idx := ic*rpcCountPerConn + j bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) - // Start goroutine on the created mutex and histogram. - go func(idx int) { - // TODO: do warm up if necessary. - // Now relying on worker client to reserve time to do warm up. - // The worker client needs to wait for some time after client is created, - // before starting benchmark. - for { - start := time.Now() - if err := doRPC(stream, reqSize, respSize); err != nil { - return - } - elapse := time.Since(start) - bc.lockingHistograms[idx].add(int64(elapse)) - select { - case <-bc.stop: - return - default: + if poissonLambda == nil { // Closed loop. + // Start goroutine on the created mutex and histogram. + go func(idx int) { + // TODO: do warm up if necessary. + // Now relying on worker client to reserve time to do warm up. + // The worker client needs to wait for some time after client is created, + // before starting benchmark. + for { + start := time.Now() + if err := doRPC(stream, reqSize, respSize); err != nil { + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + select { + case <-bc.stop: + return + default: + } } - } - }(idx) + }(idx) + } else { // Open loop. + timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / *poissonLambda) * float64(time.Second)) + time.AfterFunc(timeBetweenRPCs, func() { + bc.poissonStreaming(stream, idx, reqSize, respSize, *poissonLambda, doRPC) + }) + } } } } +func (bc *benchmarkClient) poissonUnary(client testgrpc.BenchmarkServiceClient, idx int, reqSize int, respSize int, lambda float64) { + go func() { + start := time.Now() + if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + }() + timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / lambda) * float64(time.Second)) + time.AfterFunc(timeBetweenRPCs, func() { + bc.poissonUnary(client, idx, reqSize, respSize, lambda) + }) +} + +func (bc *benchmarkClient) poissonStreaming(stream testgrpc.BenchmarkService_StreamingCallClient, idx int, reqSize int, respSize int, lambda float64, doRPC func(testgrpc.BenchmarkService_StreamingCallClient, int, int) error) { + go func() { + start := time.Now() + if err := doRPC(stream, reqSize, respSize); err != nil { + return + } + elapse := time.Since(start) + bc.lockingHistograms[idx].add(int64(elapse)) + }() + timeBetweenRPCs := time.Duration((grpcrand.ExpFloat64() / lambda) * float64(time.Second)) + time.AfterFunc(timeBetweenRPCs, func() { + bc.poissonStreaming(stream, idx, reqSize, respSize, lambda, doRPC) + }) +} + // getStats returns the stats for benchmark client. // It resets lastResetTime and all histograms if argument reset is true. func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { diff --git a/benchmark/worker/benchmark_server.go b/benchmark/worker/benchmark_server.go index da6288c11de4..f3ae4d08d836 100644 --- a/benchmark/worker/benchmark_server.go +++ b/benchmark/worker/benchmark_server.go @@ -101,7 +101,7 @@ func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchma } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - logger.Fatalf("failed to generate credentials %v", err) + logger.Fatalf("failed to generate credentials: %v", err) } opts = append(opts, grpc.Creds(creds)) } diff --git a/binarylog/binarylog_end2end_test.go b/binarylog/binarylog_end2end_test.go index 61eeb68edae8..66bb7bda3af4 100644 --- a/binarylog/binarylog_end2end_test.go +++ b/binarylog/binarylog_end2end_test.go @@ -31,13 +31,14 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/binarylog" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" iblog "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) @@ -63,10 +64,10 @@ var testSink = &testBinLogSink{} type testBinLogSink struct { mu sync.Mutex - buf []*pb.GrpcLogEntry + buf []*binlogpb.GrpcLogEntry } -func (s *testBinLogSink) Write(e *pb.GrpcLogEntry) error { +func (s *testBinLogSink) Write(e *binlogpb.GrpcLogEntry) error { s.mu.Lock() s.buf = append(s.buf, e) s.mu.Unlock() @@ -77,12 +78,12 @@ func (s *testBinLogSink) Close() error { return nil } // Returns all client entris if client is true, otherwise return all server // entries. -func (s *testBinLogSink) logEntries(client bool) []*pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_SERVER +func (s *testBinLogSink) logEntries(client bool) []*binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_SERVER if client { - logger = pb.GrpcLogEntry_LOGGER_CLIENT + logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } - var ret []*pb.GrpcLogEntry + var ret []*binlogpb.GrpcLogEntry s.mu.Lock() for _, e := range s.buf { if e.Logger == logger { @@ -252,13 +253,10 @@ func (te *test) tearDown() { te.srv.Stop() } -type testConfig struct { -} - // newTest returns a new test using the provided testing.T and // environment. It is returned with default values. Tests should // modify it before calling its startServer and clientConn methods. -func newTest(t *testing.T, tc *testConfig) *test { +func newTest(t *testing.T) *test { te := &test{ t: t, } @@ -313,7 +311,7 @@ func (te *test) clientConn() *grpc.ClientConn { if te.cc != nil { return te.cc } - opts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithBlock()} + opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()} var err error te.cc, err = grpc.Dial(te.srvAddr, opts...) @@ -483,31 +481,31 @@ type expectedData struct { err error } -func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_CLIENT - var peer *pb.Address +func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT + var peer *binlogpb.Address if !client { - logger = pb.GrpcLogEntry_LOGGER_SERVER + logger = binlogpb.GrpcLogEntry_LOGGER_SERVER ed.te.clientAddrMu.Lock() - peer = &pb.Address{ + peer = &binlogpb.Address{ Address: ed.te.clientIP.String(), IpPort: uint32(ed.te.clientPort), } if ed.te.clientIP.To4() != nil { - peer.Type = pb.Address_TYPE_IPV4 + peer.Type = binlogpb.Address_TYPE_IPV4 } else { - peer.Type = pb.Address_TYPE_IPV6 + peer.Type = binlogpb.Address_TYPE_IPV6 } ed.te.clientAddrMu.Unlock() } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, Logger: logger, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: &pb.ClientHeader{ + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: &binlogpb.ClientHeader{ Metadata: iblog.MdToMetadataProto(testMetadata), MethodName: ed.method, Authority: ed.te.srvAddr, @@ -517,29 +515,29 @@ func (ed *expectedData) newClientHeaderEntry(client bool, rpcID, inRPCID uint64) } } -func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_SERVER - var peer *pb.Address +func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_SERVER + var peer *binlogpb.Address if client { - logger = pb.GrpcLogEntry_LOGGER_CLIENT - peer = &pb.Address{ + logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + peer = &binlogpb.Address{ Address: ed.te.srvIP.String(), IpPort: uint32(ed.te.srvPort), } if ed.te.srvIP.To4() != nil { - peer.Type = pb.Address_TYPE_IPV4 + peer.Type = binlogpb.Address_TYPE_IPV4 } else { - peer.Type = pb.Address_TYPE_IPV6 + peer.Type = binlogpb.Address_TYPE_IPV6 } } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, Logger: logger, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: iblog.MdToMetadataProto(testMetadata), }, }, @@ -547,23 +545,23 @@ func (ed *expectedData) newServerHeaderEntry(client bool, rpcID, inRPCID uint64) } } -func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_CLIENT +func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT if !client { - logger = pb.GrpcLogEntry_LOGGER_SERVER + logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } data, err := proto.Marshal(msg) if err != nil { grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err) } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, Logger: logger, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, @@ -571,23 +569,23 @@ func (ed *expectedData) newClientMessageEntry(client bool, rpcID, inRPCID uint64 } } -func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_CLIENT +func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64, msg proto.Message) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT if !client { - logger = pb.GrpcLogEntry_LOGGER_SERVER + logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } data, err := proto.Marshal(msg) if err != nil { grpclogLogger.Infof("binarylogging_testing: failed to marshal proto message: %v", err) } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, Logger: logger, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, @@ -595,34 +593,34 @@ func (ed *expectedData) newServerMessageEntry(client bool, rpcID, inRPCID uint64 } } -func (ed *expectedData) newHalfCloseEntry(client bool, rpcID, inRPCID uint64) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_CLIENT +func (ed *expectedData) newHalfCloseEntry(client bool, rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_CLIENT if !client { - logger = pb.GrpcLogEntry_LOGGER_SERVER + logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. Logger: logger, } } -func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64, stErr error) *pb.GrpcLogEntry { - logger := pb.GrpcLogEntry_LOGGER_SERVER - var peer *pb.Address +func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64, stErr error) *binlogpb.GrpcLogEntry { + logger := binlogpb.GrpcLogEntry_LOGGER_SERVER + var peer *binlogpb.Address if client { - logger = pb.GrpcLogEntry_LOGGER_CLIENT - peer = &pb.Address{ + logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + peer = &binlogpb.Address{ Address: ed.te.srvIP.String(), IpPort: uint32(ed.te.srvPort), } if ed.te.srvIP.To4() != nil { - peer.Type = pb.Address_TYPE_IPV4 + peer.Type = binlogpb.Address_TYPE_IPV4 } else { - peer.Type = pb.Address_TYPE_IPV6 + peer.Type = binlogpb.Address_TYPE_IPV6 } } st, ok := status.FromError(stErr) @@ -640,14 +638,14 @@ func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64 grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - return &pb.GrpcLogEntry{ + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, Logger: logger, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: iblog.MdToMetadataProto(testTrailerMetadata), // st will be nil if err was not a status error, but nil is ok. StatusCode: uint32(st.Code()), @@ -659,20 +657,20 @@ func (ed *expectedData) newServerTrailerEntry(client bool, rpcID, inRPCID uint64 } } -func (ed *expectedData) newCancelEntry(rpcID, inRPCID uint64) *pb.GrpcLogEntry { - return &pb.GrpcLogEntry{ +func (ed *expectedData) newCancelEntry(rpcID, inRPCID uint64) *binlogpb.GrpcLogEntry { + return &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: rpcID, SequenceIdWithinCall: inRPCID, - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, Payload: nil, } } -func (ed *expectedData) toClientLogEntries() []*pb.GrpcLogEntry { +func (ed *expectedData) toClientLogEntries() []*binlogpb.GrpcLogEntry { var ( - ret []*pb.GrpcLogEntry + ret []*binlogpb.GrpcLogEntry idInRPC uint64 = 1 ) ret = append(ret, ed.newClientHeaderEntry(true, globalRPCID, idInRPC)) @@ -728,9 +726,9 @@ func (ed *expectedData) toClientLogEntries() []*pb.GrpcLogEntry { return ret } -func (ed *expectedData) toServerLogEntries() []*pb.GrpcLogEntry { +func (ed *expectedData) toServerLogEntries() []*binlogpb.GrpcLogEntry { var ( - ret []*pb.GrpcLogEntry + ret []*binlogpb.GrpcLogEntry idInRPC uint64 = 1 ) ret = append(ret, ed.newClientHeaderEntry(false, globalRPCID, idInRPC)) @@ -794,8 +792,8 @@ func (ed *expectedData) toServerLogEntries() []*pb.GrpcLogEntry { return ret } -func runRPCs(t *testing.T, tc *testConfig, cc *rpcConfig) *expectedData { - te := newTest(t, tc) +func runRPCs(t *testing.T, cc *rpcConfig) *expectedData { + te := newTest(t) te.startServer(&testServer{te: te}) defer te.tearDown() @@ -840,7 +838,7 @@ func runRPCs(t *testing.T, tc *testConfig, cc *rpcConfig) *expectedData { // // This function is typically called with only two entries. It's written in this // way so the code can be put in a for loop instead of copied twice. -func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) { +func equalLogEntry(entries ...*binlogpb.GrpcLogEntry) (equal bool) { for i, e := range entries { // Clear out some fields we don't compare. e.Timestamp = nil @@ -869,9 +867,9 @@ func equalLogEntry(entries ...*pb.GrpcLogEntry) (equal bool) { func testClientBinaryLog(t *testing.T, c *rpcConfig) error { defer testSink.clear() - expect := runRPCs(t, &testConfig{}, c) + expect := runRPCs(t, c) want := expect.toClientLogEntries() - var got []*pb.GrpcLogEntry + var got []*binlogpb.GrpcLogEntry // In racy cases, some entries are not logged when the RPC is finished (e.g. // context.Cancel). // @@ -969,9 +967,9 @@ func (s) TestClientBinaryLogCancel(t *testing.T) { func testServerBinaryLog(t *testing.T, c *rpcConfig) error { defer testSink.clear() - expect := runRPCs(t, &testConfig{}, c) + expect := runRPCs(t, c) want := expect.toServerLogEntries() - var got []*pb.GrpcLogEntry + var got []*binlogpb.GrpcLogEntry // In racy cases, some entries are not logged when the RPC is finished (e.g. // context.Cancel). This is unlikely to happen on server side, but it does // no harm to retry. diff --git a/binarylog/grpc_binarylog_v1/binarylog.pb.go b/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cdf34..ec2c2fa14dd3 100644 --- a/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. @@ -261,6 +256,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +690,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/binarylog/sink.go b/binarylog/sink.go index db79346a2917..d924e4c91867 100644 --- a/binarylog/sink.go +++ b/binarylog/sink.go @@ -24,9 +24,9 @@ package binarylog import ( "fmt" - "io/ioutil" + "os" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" iblog "google.golang.org/grpc/internal/binarylog" ) @@ -48,7 +48,7 @@ type Sink interface { // entry. Some options are: proto bytes, or proto json. // // Note this function needs to be thread-safe. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close closes this sink and cleans up resources (e.g. the flushing // goroutine). Close() error @@ -60,7 +60,7 @@ func NewTempFileSink() (Sink, error) { // Two other options to replace this function: // 1. take filename as input. // 2. export NewBufferedSink(). - tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") + tempFile, err := os.CreateTemp("/tmp", "grpcgo_binarylog_*.txt") if err != nil { return nil, fmt.Errorf("failed to create temp file: %v", err) } diff --git a/call.go b/call.go index 9e20e4d385f9..e6a1dc5d75ed 100644 --- a/call.go +++ b/call.go @@ -27,6 +27,11 @@ import ( // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/call_test.go b/call_test.go deleted file mode 100644 index abc4537ddb7d..000000000000 --- a/call_test.go +++ /dev/null @@ -1,501 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "fmt" - "io" - "math" - "net" - "strconv" - "strings" - "sync" - "testing" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/transport" - "google.golang.org/grpc/status" -) - -var ( - expectedRequest = "ping" - expectedResponse = "pong" - weirdError = "format verbs: %v%s" - sizeLargeErr = 1024 * 1024 - canceled = 0 -) - -const defaultTestTimeout = 10 * time.Second - -type testCodec struct { -} - -func (testCodec) Marshal(v interface{}) ([]byte, error) { - return []byte(*(v.(*string))), nil -} - -func (testCodec) Unmarshal(data []byte, v interface{}) error { - *(v.(*string)) = string(data) - return nil -} - -func (testCodec) String() string { - return "test" -} - -type testStreamHandler struct { - port string - t transport.ServerTransport -} - -func (h *testStreamHandler) handleStream(t *testing.T, s *transport.Stream) { - p := &parser{r: s} - for { - pf, req, err := p.recvMsg(math.MaxInt32) - if err == io.EOF { - break - } - if err != nil { - return - } - if pf != compressionNone { - t.Errorf("Received the mistaken message format %d, want %d", pf, compressionNone) - return - } - var v string - codec := testCodec{} - if err := codec.Unmarshal(req, &v); err != nil { - t.Errorf("Failed to unmarshal the received message: %v", err) - return - } - if v == "weird error" { - h.t.WriteStatus(s, status.New(codes.Internal, weirdError)) - return - } - if v == "canceled" { - canceled++ - h.t.WriteStatus(s, status.New(codes.Internal, "")) - return - } - if v == "port" { - h.t.WriteStatus(s, status.New(codes.Internal, h.port)) - return - } - - if v != expectedRequest { - h.t.WriteStatus(s, status.New(codes.Internal, strings.Repeat("A", sizeLargeErr))) - return - } - } - // send a response back to end the stream. - data, err := encode(testCodec{}, &expectedResponse) - if err != nil { - t.Errorf("Failed to encode the response: %v", err) - return - } - hdr, payload := msgHeader(data, nil) - h.t.Write(s, hdr, payload, &transport.Options{}) - h.t.WriteStatus(s, status.New(codes.OK, "")) -} - -type server struct { - lis net.Listener - port string - addr string - startedErr chan error // sent nil or an error after server starts - mu sync.Mutex - conns map[transport.ServerTransport]bool -} - -type ctxKey string - -func newTestServer() *server { - return &server{startedErr: make(chan error, 1)} -} - -// start starts server. Other goroutines should block on s.startedErr for further operations. -func (s *server) start(t *testing.T, port int, maxStreams uint32) { - var err error - if port == 0 { - s.lis, err = net.Listen("tcp", "localhost:0") - } else { - s.lis, err = net.Listen("tcp", "localhost:"+strconv.Itoa(port)) - } - if err != nil { - s.startedErr <- fmt.Errorf("failed to listen: %v", err) - return - } - s.addr = s.lis.Addr().String() - _, p, err := net.SplitHostPort(s.addr) - if err != nil { - s.startedErr <- fmt.Errorf("failed to parse listener address: %v", err) - return - } - s.port = p - s.conns = make(map[transport.ServerTransport]bool) - s.startedErr <- nil - for { - conn, err := s.lis.Accept() - if err != nil { - return - } - config := &transport.ServerConfig{ - MaxStreams: maxStreams, - } - st, err := transport.NewServerTransport("http2", conn, config) - if err != nil { - continue - } - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - st.Close() - return - } - s.conns[st] = true - s.mu.Unlock() - h := &testStreamHandler{ - port: s.port, - t: st, - } - go st.HandleStreams(func(s *transport.Stream) { - go h.handleStream(t, s) - }, func(ctx context.Context, method string) context.Context { - return ctx - }) - } -} - -func (s *server) wait(t *testing.T, timeout time.Duration) { - select { - case err := <-s.startedErr: - if err != nil { - t.Fatal(err) - } - case <-time.After(timeout): - t.Fatalf("Timed out after %v waiting for server to be ready", timeout) - } -} - -func (s *server) stop() { - s.lis.Close() - s.mu.Lock() - for c := range s.conns { - c.Close() - } - s.conns = nil - s.mu.Unlock() -} - -func setUp(t *testing.T, port int, maxStreams uint32) (*server, *ClientConn) { - return setUpWithOptions(t, port, maxStreams) -} - -func setUpWithOptions(t *testing.T, port int, maxStreams uint32, dopts ...DialOption) (*server, *ClientConn) { - server := newTestServer() - go server.start(t, port, maxStreams) - server.wait(t, 2*time.Second) - addr := "localhost:" + server.port - dopts = append(dopts, WithBlock(), WithInsecure(), WithCodec(testCodec{})) - cc, err := Dial(addr, dopts...) - if err != nil { - t.Fatalf("Failed to create ClientConn: %v", err) - } - return server, cc -} - -func (s) TestUnaryClientInterceptor(t *testing.T) { - parentKey := ctxKey("parentKey") - - interceptor := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("interceptor should have %v in context", parentKey) - } - return invoker(ctx, method, req, reply, cc, opts...) - } - - server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithUnaryInterceptor(interceptor)) - defer func() { - cc.Close() - server.stop() - }() - - var reply string - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) - if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) - } -} - -func (s) TestChainUnaryClientInterceptor(t *testing.T) { - var ( - parentKey = ctxKey("parentKey") - firstIntKey = ctxKey("firstIntKey") - secondIntKey = ctxKey("secondIntKey") - ) - - firstInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("first interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) != nil { - t.Fatalf("first interceptor should not have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) != nil { - t.Fatalf("first interceptor should not have %v in context", secondIntKey) - } - firstCtx := context.WithValue(ctx, firstIntKey, 1) - err := invoker(firstCtx, method, req, reply, cc, opts...) - *(reply.(*string)) += "1" - return err - } - - secondInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("second interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) == nil { - t.Fatalf("second interceptor should have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) != nil { - t.Fatalf("second interceptor should not have %v in context", secondIntKey) - } - secondCtx := context.WithValue(ctx, secondIntKey, 2) - err := invoker(secondCtx, method, req, reply, cc, opts...) - *(reply.(*string)) += "2" - return err - } - - lastInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("last interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) == nil { - t.Fatalf("last interceptor should have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) == nil { - t.Fatalf("last interceptor should have %v in context", secondIntKey) - } - err := invoker(ctx, method, req, reply, cc, opts...) - *(reply.(*string)) += "3" - return err - } - - server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithChainUnaryInterceptor(firstInt, secondInt, lastInt)) - defer func() { - cc.Close() - server.stop() - }() - - var reply string - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) - if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse+"321" { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) - } -} - -func (s) TestChainOnBaseUnaryClientInterceptor(t *testing.T) { - var ( - parentKey = ctxKey("parentKey") - baseIntKey = ctxKey("baseIntKey") - ) - - baseInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("base interceptor should have %v in context", parentKey) - } - if ctx.Value(baseIntKey) != nil { - t.Fatalf("base interceptor should not have %v in context", baseIntKey) - } - baseCtx := context.WithValue(ctx, baseIntKey, 1) - return invoker(baseCtx, method, req, reply, cc, opts...) - } - - chainInt := func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { - if ctx.Value(parentKey) == nil { - t.Fatalf("chain interceptor should have %v in context", parentKey) - } - if ctx.Value(baseIntKey) == nil { - t.Fatalf("chain interceptor should have %v in context", baseIntKey) - } - return invoker(ctx, method, req, reply, cc, opts...) - } - - server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithUnaryInterceptor(baseInt), WithChainUnaryInterceptor(chainInt)) - defer func() { - cc.Close() - server.stop() - }() - - var reply string - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) - if err := cc.Invoke(parentCtx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) - } -} - -func (s) TestChainStreamClientInterceptor(t *testing.T) { - var ( - parentKey = ctxKey("parentKey") - firstIntKey = ctxKey("firstIntKey") - secondIntKey = ctxKey("secondIntKey") - ) - - firstInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { - if ctx.Value(parentKey) == nil { - t.Fatalf("first interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) != nil { - t.Fatalf("first interceptor should not have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) != nil { - t.Fatalf("first interceptor should not have %v in context", secondIntKey) - } - firstCtx := context.WithValue(ctx, firstIntKey, 1) - return streamer(firstCtx, desc, cc, method, opts...) - } - - secondInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { - if ctx.Value(parentKey) == nil { - t.Fatalf("second interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) == nil { - t.Fatalf("second interceptor should have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) != nil { - t.Fatalf("second interceptor should not have %v in context", secondIntKey) - } - secondCtx := context.WithValue(ctx, secondIntKey, 2) - return streamer(secondCtx, desc, cc, method, opts...) - } - - lastInt := func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { - if ctx.Value(parentKey) == nil { - t.Fatalf("last interceptor should have %v in context", parentKey) - } - if ctx.Value(firstIntKey) == nil { - t.Fatalf("last interceptor should have %v in context", firstIntKey) - } - if ctx.Value(secondIntKey) == nil { - t.Fatalf("last interceptor should have %v in context", secondIntKey) - } - return streamer(ctx, desc, cc, method, opts...) - } - - server, cc := setUpWithOptions(t, 0, math.MaxUint32, WithChainStreamInterceptor(firstInt, secondInt, lastInt)) - defer func() { - cc.Close() - server.stop() - }() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - parentCtx := context.WithValue(ctx, ctxKey("parentKey"), 0) - _, err := cc.NewStream(parentCtx, &StreamDesc{}, "/foo/bar") - if err != nil { - t.Fatalf("grpc.NewStream(_, _, _) = %v, want ", err) - } -} - -func (s) TestInvoke(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := cc.Invoke(ctx, "/foo/bar", &expectedRequest, &reply); err != nil || reply != expectedResponse { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want ", err) - } - cc.Close() - server.stop() -} - -func (s) TestInvokeLargeErr(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - req := "hello" - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - err := cc.Invoke(ctx, "/foo/bar", &req, &reply) - if _, ok := status.FromError(err); !ok { - t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") - } - if status.Code(err) != codes.Internal || len(errorDesc(err)) != sizeLargeErr { - t.Fatalf("grpc.Invoke(_, _, _, _, _) = %v, want an error of code %d and desc size %d", err, codes.Internal, sizeLargeErr) - } - cc.Close() - server.stop() -} - -// TestInvokeErrorSpecialChars checks that error messages don't get mangled. -func (s) TestInvokeErrorSpecialChars(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - req := "weird error" - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - err := cc.Invoke(ctx, "/foo/bar", &req, &reply) - if _, ok := status.FromError(err); !ok { - t.Fatalf("grpc.Invoke(_, _, _, _, _) receives non rpc error.") - } - if got, want := errorDesc(err), weirdError; got != want { - t.Fatalf("grpc.Invoke(_, _, _, _, _) error = %q, want %q", got, want) - } - cc.Close() - server.stop() -} - -// TestInvokeCancel checks that an Invoke with a canceled context is not sent. -func (s) TestInvokeCancel(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - req := "canceled" - for i := 0; i < 100; i++ { - ctx, cancel := context.WithCancel(context.Background()) - cancel() - cc.Invoke(ctx, "/foo/bar", &req, &reply) - } - if canceled != 0 { - t.Fatalf("received %d of 100 canceled requests", canceled) - } - cc.Close() - server.stop() -} - -// TestInvokeCancelClosedNonFail checks that a canceled non-failfast RPC -// on a closed client will terminate. -func (s) TestInvokeCancelClosedNonFailFast(t *testing.T) { - server, cc := setUp(t, 0, math.MaxUint32) - var reply string - cc.Close() - req := "hello" - ctx, cancel := context.WithCancel(context.Background()) - cancel() - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply, WaitForReady(true)); err == nil { - t.Fatalf("canceled invoke on closed connection should fail") - } - server.stop() -} diff --git a/channelz/channelz.go b/channelz/channelz.go new file mode 100644 index 000000000000..32b7fa5794e1 --- /dev/null +++ b/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/channelz/grpc_channelz_v1/channelz.pb.go b/channelz/grpc_channelz_v1/channelz.pb.go index 416b3528d5c6..1c19e6b52c27 100644 --- a/channelz/grpc_channelz_v1/channelz.pb.go +++ b/channelz/grpc_channelz_v1/channelz.pb.go @@ -21,14 +21,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/channelz/v1/channelz.proto package grpc_channelz_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" anypb "google.golang.org/protobuf/types/known/anypb" @@ -46,10 +45,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ChannelConnectivityState_State int32 const ( @@ -167,7 +162,7 @@ type Channel struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The identifier for this channel. This should bet set. + // The identifier for this channel. This should be set. Ref *ChannelRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` // Data specific to this channel. Data *ChannelData `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // At most one of 'channel_ref+subchannel_ref' and 'socket' is set. @@ -514,6 +509,7 @@ type ChannelTraceEvent struct { // created. // // Types that are assignable to ChildRef: + // // *ChannelTraceEvent_ChannelRef // *ChannelTraceEvent_SubchannelRef ChildRef isChannelTraceEvent_ChildRef `protobuf_oneof:"child_ref"` @@ -1338,6 +1334,7 @@ type Address struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Address: + // // *Address_TcpipAddress // *Address_UdsAddress_ // *Address_OtherAddress_ @@ -1433,6 +1430,7 @@ type Security struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Model: + // // *Security_Tls_ // *Security_Other Model isSecurity_Model `protobuf_oneof:"model"` @@ -2908,6 +2906,7 @@ type Security_Tls struct { unknownFields protoimpl.UnknownFields // Types that are assignable to CipherSuite: + // // *Security_Tls_StandardName // *Security_Tls_OtherName CipherSuite isSecurity_Tls_CipherSuite `protobuf_oneof:"cipher_suite"` diff --git a/channelz/grpc_channelz_v1/channelz_grpc.pb.go b/channelz/grpc_channelz_v1/channelz_grpc.pb.go index 051d1ac440c7..070f787ca527 100644 --- a/channelz/grpc_channelz_v1/channelz_grpc.pb.go +++ b/channelz/grpc_channelz_v1/channelz_grpc.pb.go @@ -1,4 +1,29 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines an interface for exporting monitoring information +// out of gRPC servers. See the full design at +// https://github.com/grpc/proposal/blob/master/A14-channelz.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/channelz/v1/channelz.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/channelz/v1/channelz.proto package grpc_channelz_v1 @@ -14,6 +39,16 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Channelz_GetTopChannels_FullMethodName = "/grpc.channelz.v1.Channelz/GetTopChannels" + Channelz_GetServers_FullMethodName = "/grpc.channelz.v1.Channelz/GetServers" + Channelz_GetServer_FullMethodName = "/grpc.channelz.v1.Channelz/GetServer" + Channelz_GetServerSockets_FullMethodName = "/grpc.channelz.v1.Channelz/GetServerSockets" + Channelz_GetChannel_FullMethodName = "/grpc.channelz.v1.Channelz/GetChannel" + Channelz_GetSubchannel_FullMethodName = "/grpc.channelz.v1.Channelz/GetSubchannel" + Channelz_GetSocket_FullMethodName = "/grpc.channelz.v1.Channelz/GetSocket" +) + // ChannelzClient is the client API for Channelz service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -45,7 +80,7 @@ func NewChannelzClient(cc grpc.ClientConnInterface) ChannelzClient { func (c *channelzClient) GetTopChannels(ctx context.Context, in *GetTopChannelsRequest, opts ...grpc.CallOption) (*GetTopChannelsResponse, error) { out := new(GetTopChannelsResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetTopChannels", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetTopChannels_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -54,7 +89,7 @@ func (c *channelzClient) GetTopChannels(ctx context.Context, in *GetTopChannelsR func (c *channelzClient) GetServers(ctx context.Context, in *GetServersRequest, opts ...grpc.CallOption) (*GetServersResponse, error) { out := new(GetServersResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServers", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetServers_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -63,7 +98,7 @@ func (c *channelzClient) GetServers(ctx context.Context, in *GetServersRequest, func (c *channelzClient) GetServer(ctx context.Context, in *GetServerRequest, opts ...grpc.CallOption) (*GetServerResponse, error) { out := new(GetServerResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServer", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetServer_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -72,7 +107,7 @@ func (c *channelzClient) GetServer(ctx context.Context, in *GetServerRequest, op func (c *channelzClient) GetServerSockets(ctx context.Context, in *GetServerSocketsRequest, opts ...grpc.CallOption) (*GetServerSocketsResponse, error) { out := new(GetServerSocketsResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetServerSockets", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetServerSockets_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -81,7 +116,7 @@ func (c *channelzClient) GetServerSockets(ctx context.Context, in *GetServerSock func (c *channelzClient) GetChannel(ctx context.Context, in *GetChannelRequest, opts ...grpc.CallOption) (*GetChannelResponse, error) { out := new(GetChannelResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetChannel", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetChannel_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -90,7 +125,7 @@ func (c *channelzClient) GetChannel(ctx context.Context, in *GetChannelRequest, func (c *channelzClient) GetSubchannel(ctx context.Context, in *GetSubchannelRequest, opts ...grpc.CallOption) (*GetSubchannelResponse, error) { out := new(GetSubchannelResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetSubchannel", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetSubchannel_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -99,7 +134,7 @@ func (c *channelzClient) GetSubchannel(ctx context.Context, in *GetSubchannelReq func (c *channelzClient) GetSocket(ctx context.Context, in *GetSocketRequest, opts ...grpc.CallOption) (*GetSocketResponse, error) { out := new(GetSocketResponse) - err := c.cc.Invoke(ctx, "/grpc.channelz.v1.Channelz/GetSocket", in, out, opts...) + err := c.cc.Invoke(ctx, Channelz_GetSocket_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -174,7 +209,7 @@ func _Channelz_GetTopChannels_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetTopChannels", + FullMethod: Channelz_GetTopChannels_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetTopChannels(ctx, req.(*GetTopChannelsRequest)) @@ -192,7 +227,7 @@ func _Channelz_GetServers_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetServers", + FullMethod: Channelz_GetServers_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServers(ctx, req.(*GetServersRequest)) @@ -210,7 +245,7 @@ func _Channelz_GetServer_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetServer", + FullMethod: Channelz_GetServer_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServer(ctx, req.(*GetServerRequest)) @@ -228,7 +263,7 @@ func _Channelz_GetServerSockets_Handler(srv interface{}, ctx context.Context, de } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetServerSockets", + FullMethod: Channelz_GetServerSockets_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetServerSockets(ctx, req.(*GetServerSocketsRequest)) @@ -246,7 +281,7 @@ func _Channelz_GetChannel_Handler(srv interface{}, ctx context.Context, dec func } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetChannel", + FullMethod: Channelz_GetChannel_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetChannel(ctx, req.(*GetChannelRequest)) @@ -264,7 +299,7 @@ func _Channelz_GetSubchannel_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetSubchannel", + FullMethod: Channelz_GetSubchannel_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetSubchannel(ctx, req.(*GetSubchannelRequest)) @@ -282,7 +317,7 @@ func _Channelz_GetSocket_Handler(srv interface{}, ctx context.Context, dec func( } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.channelz.v1.Channelz/GetSocket", + FullMethod: Channelz_GetSocket_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ChannelzServer).GetSocket(ctx, req.(*GetSocketRequest)) diff --git a/channelz/service/func_linux.go b/channelz/service/func_linux.go index ce38a921b974..0873603c8520 100644 --- a/channelz/service/func_linux.go +++ b/channelz/service/func_linux.go @@ -25,6 +25,7 @@ import ( durpb "github.com/golang/protobuf/ptypes/duration" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" + "google.golang.org/protobuf/types/known/anypb" ) func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { @@ -34,7 +35,7 @@ func convertToPtypesDuration(sec int64, usec int64) *durpb.Duration { func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOption { var opts []*channelzpb.SocketOption if skopts.Linger != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionLinger{ + additional, err := anypb.New(&channelzpb.SocketOptionLinger{ Active: skopts.Linger.Onoff != 0, Duration: convertToPtypesDuration(int64(skopts.Linger.Linger), 0), }) @@ -43,10 +44,12 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio Name: "SO_LINGER", Additional: additional, }) + } else { + logger.Warningf("Failed to marshal socket options linger %+v: %v", skopts.Linger, err) } } if skopts.RecvTimeout != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ + additional, err := anypb.New(&channelzpb.SocketOptionTimeout{ Duration: convertToPtypesDuration(int64(skopts.RecvTimeout.Sec), int64(skopts.RecvTimeout.Usec)), }) if err == nil { @@ -54,10 +57,12 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio Name: "SO_RCVTIMEO", Additional: additional, }) + } else { + logger.Warningf("Failed to marshal socket options receive timeout %+v: %v", skopts.RecvTimeout, err) } } if skopts.SendTimeout != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTimeout{ + additional, err := anypb.New(&channelzpb.SocketOptionTimeout{ Duration: convertToPtypesDuration(int64(skopts.SendTimeout.Sec), int64(skopts.SendTimeout.Usec)), }) if err == nil { @@ -65,10 +70,12 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio Name: "SO_SNDTIMEO", Additional: additional, }) + } else { + logger.Warningf("Failed to marshal socket options send timeout %+v: %v", skopts.SendTimeout, err) } } if skopts.TCPInfo != nil { - additional, err := ptypes.MarshalAny(&channelzpb.SocketOptionTcpInfo{ + additional, err := anypb.New(&channelzpb.SocketOptionTcpInfo{ TcpiState: uint32(skopts.TCPInfo.State), TcpiCaState: uint32(skopts.TCPInfo.Ca_state), TcpiRetransmits: uint32(skopts.TCPInfo.Retransmits), @@ -104,6 +111,8 @@ func sockoptToProto(skopts *channelz.SocketOptionData) []*channelzpb.SocketOptio Name: "TCP_INFO", Additional: additional, }) + } else { + logger.Warningf("Failed to marshal socket options TCP info %+v: %v", skopts.TCPInfo, err) } } return opts diff --git a/channelz/service/func_nonlinux.go b/channelz/service/func_nonlinux.go index eb53334ed0d1..473495d6655e 100644 --- a/channelz/service/func_nonlinux.go +++ b/channelz/service/func_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * diff --git a/channelz/service/service.go b/channelz/service/service.go index c60ab604e81b..ae19ed3792ea 100644 --- a/channelz/service/service.go +++ b/channelz/service/service.go @@ -25,15 +25,18 @@ import ( "github.com/golang/protobuf/ptypes" wrpb "github.com/golang/protobuf/ptypes/wrappers" - "google.golang.org/grpc" channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + + "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/status" + "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/types/known/anypb" ) func init() { @@ -43,6 +46,10 @@ func init() { var logger = grpclog.Component("channelz") // RegisterChannelzServiceToServer registers the channelz service to the given server. +// +// Note: it is preferred to use the admin API +// (https://pkg.go.dev/google.golang.org/grpc/admin#Register) instead to +// register Channelz and other administrative services. func RegisterChannelzServiceToServer(s grpc.ServiceRegistrar) { channelzgrpc.RegisterChannelzServer(s, newCZServer()) } @@ -78,7 +85,7 @@ func channelTraceToProto(ct *channelz.ChannelTrace) *channelzpb.ChannelTrace { if ts, err := ptypes.TimestampProto(ct.CreationTime); err == nil { pbt.CreationTimestamp = ts } - var events []*channelzpb.ChannelTraceEvent + events := make([]*channelzpb.ChannelTraceEvent, 0, len(ct.Events)) for _, e := range ct.Events { cte := &channelzpb.ChannelTraceEvent{ Description: e.Desc, @@ -183,7 +190,7 @@ func securityToProto(se credentials.ChannelzSecurityValue) *channelzpb.Security otherSecurity := &channelzpb.Security_OtherSecurity{ Name: v.Name, } - if anyval, err := ptypes.MarshalAny(v.Value); err == nil { + if anyval, err := anypb.New(protoadapt.MessageV2Of(v.Value)); err == nil { otherSecurity.Value = anyval } return &channelzpb.Security{Model: &channelzpb.Security_Other{Other: otherSecurity}} diff --git a/channelz/service/service_sktopt_test.go b/channelz/service/service_sktopt_test.go index ecd4a2ad05f9..8ec5341cb030 100644 --- a/channelz/service/service_sktopt_test.go +++ b/channelz/service/service_sktopt_test.go @@ -1,3 +1,4 @@ +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 @@ -27,15 +28,17 @@ package service import ( "context" - "reflect" "strconv" "testing" "github.com/golang/protobuf/ptypes" - durpb "github.com/golang/protobuf/ptypes/duration" + "github.com/google/go-cmp/cmp" "golang.org/x/sys/unix" - channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" "google.golang.org/grpc/internal/channelz" + "google.golang.org/protobuf/testing/protocmp" + + durpb "github.com/golang/protobuf/ptypes/duration" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) func init() { @@ -125,7 +128,7 @@ func protoToSocketOption(skopts []*channelzpb.SocketOption) *channelz.SocketOpti } func (s) TestGetSocketOptions(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) ss := []*dummySocket{ { @@ -138,20 +141,27 @@ func (s) TestGetSocketOptions(t *testing.T) { }, } svr := newCZServer() - ids := make([]int64, len(ss)) + ids := make([]*channelz.Identifier, len(ss)) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) for i, s := range ss { - ids[i] = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) + ids[i], _ = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) defer channelz.RemoveEntry(ids[i]) } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() for i, s := range ss { - resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i]}) - metrics := resp.GetSocket() - if !reflect.DeepEqual(metrics.GetRef(), &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}) || !reflect.DeepEqual(socketProtoToStruct(metrics), s) { - t.Fatalf("resp.GetSocket() want: metrics.GetRef() = %#v and %#v, got: metrics.GetRef() = %#v and %#v", &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}, s, metrics.GetRef(), socketProtoToStruct(metrics)) + resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i].Int()}) + got, want := resp.GetSocket().GetRef(), &channelzpb.SocketRef{SocketId: ids[i].Int(), Name: strconv.Itoa(i)} + if !cmp.Equal(got, want, protocmp.Transform()) { + t.Fatalf("resp.GetSocket() returned metrics.GetRef() = %#v, want %#v", got, want) + } + socket, err := socketProtoToStruct(resp.GetSocket()) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(s, socket, protocmp.Transform(), cmp.AllowUnexported(dummySocket{})); diff != "" { + t.Fatalf("unexpected socket, diff (-want +got):\n%s", diff) } } } diff --git a/channelz/service/service_test.go b/channelz/service/service_test.go index 03d2b29c27b4..94ca6b8b35b7 100644 --- a/channelz/service/service_test.go +++ b/channelz/service/service_test.go @@ -22,18 +22,21 @@ import ( "context" "fmt" "net" - "reflect" "strconv" + "strings" "testing" "time" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/protobuf/testing/protocmp" + + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" ) func init() { @@ -61,14 +64,6 @@ type protoToSocketOptFunc func([]*channelzpb.SocketOption) *channelz.SocketOptio // It is only defined under linux environment on x86 architecture. var protoToSocketOpt protoToSocketOptFunc -// emptyTime is used for detecting unset value of time.Time type. -// For go1.7 and earlier, ptypes.Timestamp will fill in the loc field of time.Time -// with &utcLoc. However zero value of a time.Time type value loc field is nil. -// This behavior will make reflect.DeepEqual fail upon unset time.Time field, -// and cause false positive fatal error. -// TODO: Go1.7 is no longer supported - does this need a change? -var emptyTime time.Time - const defaultTestTimeout = 10 * time.Second type dummyChannel struct { @@ -149,7 +144,7 @@ func (d *dummySocket) ChannelzMetric() *channelz.SocketInternalMetric { } } -func channelProtoToStruct(c *channelzpb.Channel) *dummyChannel { +func channelProtoToStruct(c *channelzpb.Channel) (*dummyChannel, error) { dc := &dummyChannel{} pdata := c.GetData() switch pdata.GetState().GetState() { @@ -170,29 +165,27 @@ func channelProtoToStruct(c *channelzpb.Channel) *dummyChannel { dc.callsStarted = pdata.CallsStarted dc.callsSucceeded = pdata.CallsSucceeded dc.callsFailed = pdata.CallsFailed - if t, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - dc.lastCallStartedTimestamp = t - } + if err := pdata.GetLastCallStartedTimestamp().CheckValid(); err != nil { + return nil, err } - return dc + dc.lastCallStartedTimestamp = pdata.GetLastCallStartedTimestamp().AsTime() + return dc, nil } -func serverProtoToStruct(s *channelzpb.Server) *dummyServer { +func serverProtoToStruct(s *channelzpb.Server) (*dummyServer, error) { ds := &dummyServer{} pdata := s.GetData() ds.callsStarted = pdata.CallsStarted ds.callsSucceeded = pdata.CallsSucceeded ds.callsFailed = pdata.CallsFailed - if t, err := ptypes.Timestamp(pdata.GetLastCallStartedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastCallStartedTimestamp = t - } + if err := pdata.GetLastCallStartedTimestamp().CheckValid(); err != nil { + return nil, err } - return ds + ds.lastCallStartedTimestamp = pdata.GetLastCallStartedTimestamp().AsTime() + return ds, nil } -func socketProtoToStruct(s *channelzpb.Socket) *dummySocket { +func socketProtoToStruct(s *channelzpb.Socket) (*dummySocket, error) { ds := &dummySocket{} pdata := s.GetData() ds.streamsStarted = pdata.GetStreamsStarted() @@ -201,26 +194,22 @@ func socketProtoToStruct(s *channelzpb.Socket) *dummySocket { ds.messagesSent = pdata.GetMessagesSent() ds.messagesReceived = pdata.GetMessagesReceived() ds.keepAlivesSent = pdata.GetKeepAlivesSent() - if t, err := ptypes.Timestamp(pdata.GetLastLocalStreamCreatedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastLocalStreamCreatedTimestamp = t - } + if err := pdata.GetLastLocalStreamCreatedTimestamp().CheckValid(); err != nil { + return nil, err } - if t, err := ptypes.Timestamp(pdata.GetLastRemoteStreamCreatedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastRemoteStreamCreatedTimestamp = t - } + ds.lastLocalStreamCreatedTimestamp = pdata.GetLastLocalStreamCreatedTimestamp().AsTime() + if err := pdata.GetLastRemoteStreamCreatedTimestamp().CheckValid(); err != nil { + return nil, err } - if t, err := ptypes.Timestamp(pdata.GetLastMessageSentTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastMessageSentTimestamp = t - } + ds.lastRemoteStreamCreatedTimestamp = pdata.GetLastRemoteStreamCreatedTimestamp().AsTime() + if err := pdata.GetLastMessageSentTimestamp().CheckValid(); err != nil { + return nil, err } - if t, err := ptypes.Timestamp(pdata.GetLastMessageReceivedTimestamp()); err == nil { - if !t.Equal(emptyTime) { - ds.lastMessageReceivedTimestamp = t - } + ds.lastMessageSentTimestamp = pdata.GetLastMessageSentTimestamp().AsTime() + if err := pdata.GetLastMessageReceivedTimestamp().CheckValid(); err != nil { + return nil, err } + ds.lastMessageReceivedTimestamp = pdata.GetLastMessageReceivedTimestamp().AsTime() if v := pdata.GetLocalFlowControlWindow(); v != nil { ds.localFlowControlWindow = v.Value } @@ -240,7 +229,7 @@ func socketProtoToStruct(s *channelzpb.Socket) *dummySocket { ds.remoteAddr = protoToAddr(remote) } ds.remoteName = s.GetRemoteName() - return ds + return ds, nil } func protoToSecurity(protoSecurity *channelzpb.Security) credentials.ChannelzSecurityValue { @@ -322,10 +311,10 @@ func (s) TestGetTopChannels(t *testing.T) { }, {}, } - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) for _, c := range tcs { - id := channelz.RegisterChannel(c, 0, "") + id := channelz.RegisterChannel(c, nil, "") defer channelz.RemoveEntry(id) } s := newCZServer() @@ -336,12 +325,16 @@ func (s) TestGetTopChannels(t *testing.T) { t.Fatalf("resp.GetEnd() want true, got %v", resp.GetEnd()) } for i, c := range resp.GetChannel() { - if !reflect.DeepEqual(channelProtoToStruct(c), tcs[i]) { - t.Fatalf("dummyChannel: %d, want: %#v, got: %#v", i, tcs[i], channelProtoToStruct(c)) + channel, err := channelProtoToStruct(c) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(tcs[i], channel, protocmp.Transform(), cmp.AllowUnexported(dummyChannel{})); diff != "" { + t.Fatalf("unexpected channel, diff (-want +got):\n%s", diff) } } for i := 0; i < 50; i++ { - id := channelz.RegisterChannel(tcs[0], 0, "") + id := channelz.RegisterChannel(tcs[0], nil, "") defer channelz.RemoveEntry(id) } resp, _ = s.GetTopChannels(ctx, &channelzpb.GetTopChannelsRequest{StartChannelId: 0}) @@ -371,7 +364,7 @@ func (s) TestGetServers(t *testing.T) { lastCallStartedTimestamp: time.Now().UTC(), }, } - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) for _, s := range ss { id := channelz.RegisterServer(s, "") @@ -385,8 +378,12 @@ func (s) TestGetServers(t *testing.T) { t.Fatalf("resp.GetEnd() want true, got %v", resp.GetEnd()) } for i, s := range resp.GetServer() { - if !reflect.DeepEqual(serverProtoToStruct(s), ss[i]) { - t.Fatalf("dummyServer: %d, want: %#v, got: %#v", i, ss[i], serverProtoToStruct(s)) + server, err := serverProtoToStruct(s) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(ss[i], server, protocmp.Transform(), cmp.AllowUnexported(dummyServer{})); diff != "" { + t.Fatalf("unexpected server, diff (-want +got):\n%s", diff) } } for i := 0; i < 50; i++ { @@ -400,39 +397,39 @@ func (s) TestGetServers(t *testing.T) { } func (s) TestGetServerSockets(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) refNames := []string{"listen socket 1", "normal socket 1", "normal socket 2"} - ids := make([]int64, 3) - ids[0] = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) - ids[1] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) - ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) + ids := make([]*channelz.Identifier, 3) + ids[0], _ = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) + ids[1], _ = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) + ids[2], _ = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) for _, id := range ids { defer channelz.RemoveEntry(id) } svr := newCZServer() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - resp, _ := svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: 0}) + resp, _ := svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID.Int(), StartSocketId: 0}) if !resp.GetEnd() { t.Fatalf("resp.GetEnd() want: true, got: %v", resp.GetEnd()) } // GetServerSockets only return normal sockets. want := map[int64]string{ - ids[1]: refNames[1], - ids[2]: refNames[2], + ids[1].Int(): refNames[1], + ids[2].Int(): refNames[2], } - if !reflect.DeepEqual(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { + if !cmp.Equal(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { t.Fatalf("GetServerSockets want: %#v, got: %#v", want, resp.GetSocketRef()) } for i := 0; i < 50; i++ { - id := channelz.RegisterNormalSocket(&dummySocket{}, svrID, "") + id, _ := channelz.RegisterNormalSocket(&dummySocket{}, svrID, "") defer channelz.RemoveEntry(id) } - resp, _ = svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: 0}) + resp, _ = svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID.Int(), StartSocketId: 0}) if resp.GetEnd() { t.Fatalf("resp.GetEnd() want false, got %v", resp.GetEnd()) } @@ -441,15 +438,15 @@ func (s) TestGetServerSockets(t *testing.T) { // This test makes a GetServerSockets with a non-zero start ID, and expect only // sockets with ID >= the given start ID. func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) refNames := []string{"listen socket 1", "normal socket 1", "normal socket 2"} - ids := make([]int64, 3) - ids[0] = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) - ids[1] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) - ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) + ids := make([]*channelz.Identifier, 3) + ids[0], _ = channelz.RegisterListenSocket(&dummySocket{}, svrID, refNames[0]) + ids[1], _ = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[1]) + ids[2], _ = channelz.RegisterNormalSocket(&dummySocket{}, svrID, refNames[2]) for _, id := range ids { defer channelz.RemoveEntry(id) } @@ -458,55 +455,62 @@ func (s) TestGetServerSocketsNonZeroStartID(t *testing.T) { defer cancel() // Make GetServerSockets with startID = ids[1]+1, so socket-1 won't be // included in the response. - resp, _ := svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID, StartSocketId: ids[1] + 1}) + resp, _ := svr.GetServerSockets(ctx, &channelzpb.GetServerSocketsRequest{ServerId: svrID.Int(), StartSocketId: ids[1].Int() + 1}) if !resp.GetEnd() { t.Fatalf("resp.GetEnd() want: true, got: %v", resp.GetEnd()) } // GetServerSockets only return normal socket-2, socket-1 should be // filtered by start ID. want := map[int64]string{ - ids[2]: refNames[2], + ids[2].Int(): refNames[2], } - if !reflect.DeepEqual(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { + if !cmp.Equal(convertSocketRefSliceToMap(resp.GetSocketRef()), want) { t.Fatalf("GetServerSockets want: %#v, got: %#v", want, resp.GetSocketRef()) } } func (s) TestGetChannel(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) + refNames := []string{"top channel 1", "nested channel 1", "sub channel 2", "nested channel 3"} - ids := make([]int64, 4) - ids[0] = channelz.RegisterChannel(&dummyChannel{}, 0, refNames[0]) + ids := make([]*channelz.Identifier, 4) + ids[0] = channelz.RegisterChannel(&dummyChannel{}, nil, refNames[0]) channelz.AddTraceEvent(logger, ids[0], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtInfo, }) + ids[1] = channelz.RegisterChannel(&dummyChannel{}, ids[0], refNames[1]) channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1]), + Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1].Int()), Severity: channelz.CtInfo, }, }) - ids[2] = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[2]) + var err error + ids[2], err = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[2]) + if err != nil { + t.Fatalf("channelz.RegisterSubChannel() failed: %v", err) + } channelz.AddTraceEvent(logger, ids[2], 0, &channelz.TraceEventDesc{ Desc: "SubChannel Created", Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2]), + Desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2].Int()), Severity: channelz.CtInfo, }, }) + ids[3] = channelz.RegisterChannel(&dummyChannel{}, ids[1], refNames[3]) channelz.AddTraceEvent(logger, ids[3], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[3]), + Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[3].Int()), Severity: channelz.CtInfo, }, }) @@ -518,21 +522,23 @@ func (s) TestGetChannel(t *testing.T) { Desc: "Resolver returns an empty address list", Severity: channelz.CtWarning, }) + for _, id := range ids { defer channelz.RemoveEntry(id) } + svr := newCZServer() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - resp, _ := svr.GetChannel(ctx, &channelzpb.GetChannelRequest{ChannelId: ids[0]}) + resp, _ := svr.GetChannel(ctx, &channelzpb.GetChannelRequest{ChannelId: ids[0].Int()}) metrics := resp.GetChannel() subChans := metrics.GetSubchannelRef() - if len(subChans) != 1 || subChans[0].GetName() != refNames[2] || subChans[0].GetSubchannelId() != ids[2] { - t.Fatalf("metrics.GetSubChannelRef() want %#v, got %#v", []*channelzpb.SubchannelRef{{SubchannelId: ids[2], Name: refNames[2]}}, subChans) + if len(subChans) != 1 || subChans[0].GetName() != refNames[2] || subChans[0].GetSubchannelId() != ids[2].Int() { + t.Fatalf("metrics.GetSubChannelRef() want %#v, got %#v", []*channelzpb.SubchannelRef{{SubchannelId: ids[2].Int(), Name: refNames[2]}}, subChans) } nestedChans := metrics.GetChannelRef() - if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[1] || nestedChans[0].GetChannelId() != ids[1] { - t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[1], Name: refNames[1]}}, nestedChans) + if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[1] || nestedChans[0].GetChannelId() != ids[1].Int() { + t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[1].Int(), Name: refNames[1]}}, nestedChans) } trace := metrics.GetData().GetTrace() want := []struct { @@ -542,14 +548,14 @@ func (s) TestGetChannel(t *testing.T) { childRef string }{ {desc: "Channel Created", severity: channelzpb.ChannelTraceEvent_CT_INFO}, - {desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1]), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[1], childRef: refNames[1]}, - {desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2]), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[2], childRef: refNames[2]}, + {desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[1].Int()), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[1].Int(), childRef: refNames[1]}, + {desc: fmt.Sprintf("SubChannel(id:%d) created", ids[2].Int()), severity: channelzpb.ChannelTraceEvent_CT_INFO, childID: ids[2].Int(), childRef: refNames[2]}, {desc: fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready), severity: channelzpb.ChannelTraceEvent_CT_INFO}, {desc: "Resolver returns an empty address list", severity: channelzpb.ChannelTraceEvent_CT_WARNING}, } for i, e := range trace.Events { - if e.GetDescription() != want[i].desc { + if !strings.Contains(e.GetDescription(), want[i].desc) { t.Fatalf("trace: GetDescription want %#v, got %#v", want[i].desc, e.GetDescription()) } if e.GetSeverity() != want[i].severity { @@ -564,11 +570,11 @@ func (s) TestGetChannel(t *testing.T) { } } } - resp, _ = svr.GetChannel(ctx, &channelzpb.GetChannelRequest{ChannelId: ids[1]}) + resp, _ = svr.GetChannel(ctx, &channelzpb.GetChannelRequest{ChannelId: ids[1].Int()}) metrics = resp.GetChannel() nestedChans = metrics.GetChannelRef() - if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[3] || nestedChans[0].GetChannelId() != ids[3] { - t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[3], Name: refNames[3]}}, nestedChans) + if len(nestedChans) != 1 || nestedChans[0].GetName() != refNames[3] || nestedChans[0].GetChannelId() != ids[3].Int() { + t.Fatalf("metrics.GetChannelRef() want %#v, got %#v", []*channelzpb.ChannelRef{{ChannelId: ids[3].Int(), Name: refNames[3]}}, nestedChans) } } @@ -578,26 +584,30 @@ func (s) TestGetSubChannel(t *testing.T) { subchanConnectivityChange = fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready) subChanPickNewAddress = fmt.Sprintf("Subchannel picks a new address %q to connect", "0.0.0.0") ) - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) refNames := []string{"top channel 1", "sub channel 1", "socket 1", "socket 2"} - ids := make([]int64, 4) - ids[0] = channelz.RegisterChannel(&dummyChannel{}, 0, refNames[0]) + ids := make([]*channelz.Identifier, 4) + ids[0] = channelz.RegisterChannel(&dummyChannel{}, nil, refNames[0]) channelz.AddTraceEvent(logger, ids[0], 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtInfo, }) - ids[1] = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[1]) + var err error + ids[1], err = channelz.RegisterSubChannel(&dummyChannel{}, ids[0], refNames[1]) + if err != nil { + t.Fatalf("channelz.RegisterSubChannel() failed: %v", err) + } channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: subchanCreated, Severity: channelz.CtInfo, Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[0]), + Desc: fmt.Sprintf("Nested Channel(id:%d) created", ids[0].Int()), Severity: channelz.CtInfo, }, }) - ids[2] = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[2]) - ids[3] = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[3]) + ids[2], _ = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[2]) + ids[3], _ = channelz.RegisterNormalSocket(&dummySocket{}, ids[1], refNames[3]) channelz.AddTraceEvent(logger, ids[1], 0, &channelz.TraceEventDesc{ Desc: subchanConnectivityChange, Severity: channelz.CtInfo, @@ -612,13 +622,13 @@ func (s) TestGetSubChannel(t *testing.T) { svr := newCZServer() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - resp, _ := svr.GetSubchannel(ctx, &channelzpb.GetSubchannelRequest{SubchannelId: ids[1]}) + resp, _ := svr.GetSubchannel(ctx, &channelzpb.GetSubchannelRequest{SubchannelId: ids[1].Int()}) metrics := resp.GetSubchannel() want := map[int64]string{ - ids[2]: refNames[2], - ids[3]: refNames[3], + ids[2].Int(): refNames[2], + ids[3].Int(): refNames[3], } - if !reflect.DeepEqual(convertSocketRefSliceToMap(metrics.GetSocketRef()), want) { + if !cmp.Equal(convertSocketRefSliceToMap(metrics.GetSocketRef()), want) { t.Fatalf("metrics.GetSocketRef() want %#v: got: %#v", want, metrics.GetSocketRef()) } @@ -652,7 +662,7 @@ func (s) TestGetSubChannel(t *testing.T) { } func (s) TestGetSocket(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer cleanupWrapper(czCleanup, t) ss := []*dummySocket{ { @@ -726,20 +736,27 @@ func (s) TestGetSocket(t *testing.T) { }, } svr := newCZServer() - ids := make([]int64, len(ss)) + ids := make([]*channelz.Identifier, len(ss)) svrID := channelz.RegisterServer(&dummyServer{}, "") defer channelz.RemoveEntry(svrID) for i, s := range ss { - ids[i] = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) + ids[i], _ = channelz.RegisterNormalSocket(s, svrID, strconv.Itoa(i)) defer channelz.RemoveEntry(ids[i]) } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() for i, s := range ss { - resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i]}) - metrics := resp.GetSocket() - if !reflect.DeepEqual(metrics.GetRef(), &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}) || !reflect.DeepEqual(socketProtoToStruct(metrics), s) { - t.Fatalf("resp.GetSocket() want: metrics.GetRef() = %#v and %#v, got: metrics.GetRef() = %#v and %#v", &channelzpb.SocketRef{SocketId: ids[i], Name: strconv.Itoa(i)}, s, metrics.GetRef(), socketProtoToStruct(metrics)) + resp, _ := svr.GetSocket(ctx, &channelzpb.GetSocketRequest{SocketId: ids[i].Int()}) + got, want := resp.GetSocket().GetRef(), &channelzpb.SocketRef{SocketId: ids[i].Int(), Name: strconv.Itoa(i)} + if !cmp.Equal(got, want, protocmp.Transform()) { + t.Fatalf("resp.GetSocket() returned metrics.GetRef() = %#v, want %#v", got, want) + } + socket, err := socketProtoToStruct(resp.GetSocket()) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(s, socket, protocmp.Transform(), cmp.AllowUnexported(dummySocket{})); diff != "" { + t.Fatalf("unexpected socket, diff (-want +got):\n%s", diff) } } } diff --git a/channelz/service/util_sktopt_386_test.go b/channelz/service/util_sktopt_386_test.go index d9c981271361..3ba3dc96e7c6 100644 --- a/channelz/service/util_sktopt_386_test.go +++ b/channelz/service/util_sktopt_386_test.go @@ -1,3 +1,4 @@ +//go:build 386 && linux // +build 386,linux /* diff --git a/channelz/service/util_sktopt_amd64_test.go b/channelz/service/util_sktopt_amd64_test.go index 0ff06d128330..124d7b758199 100644 --- a/channelz/service/util_sktopt_amd64_test.go +++ b/channelz/service/util_sktopt_amd64_test.go @@ -1,3 +1,4 @@ +//go:build amd64 && linux // +build amd64,linux /* diff --git a/clientconn.go b/clientconn.go index 77a08fd33bf8..bfd7555a8bf2 100644 --- a/clientconn.go +++ b/clientconn.go @@ -23,7 +23,7 @@ import ( "errors" "fmt" "math" - "reflect" + "net/url" "strings" "sync" "sync/atomic" @@ -37,7 +37,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -69,6 +69,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -79,17 +82,17 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., OAuth2 token) which requires secure connection on an insecure - // connection. + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") ) const ( @@ -134,16 +137,43 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) + + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } for _, opt := range opts { opt.apply(&cc.dopts) @@ -158,40 +188,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") - } - cc.csMgr.channelzID = cc.channelzID - } + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if !cc.dopts.insecure { - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - } else { - if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { - return nil, errCredentialsConflict - } - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -229,58 +230,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - scSet = true - } - default: - } - } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } // Determine the resolver to use. - cc.parsedTarget = grpcutil.ParseTarget(cc.target, cc.dopts.copts.Dialer != nil) - channelz.Infof(logger, cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) - resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - channelz.Infof(logger, cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) - if resolverBuilder == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) - } + if err := cc.parseTargetAndFindResolver(); err != nil { + return nil, err } - - creds := cc.dopts.copts.TransportCredentials - if creds != nil && creds.Info().ServerName != "" { - cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.authority != "" { - cc.authority = cc.dopts.authority - } else if strings.HasPrefix(cc.target, "unix:") || strings.HasPrefix(cc.target, "unix-abstract:") { - cc.authority = "localhost" - } else if strings.HasPrefix(cc.parsedTarget.Endpoint, ":") { - cc.authority = "localhost" + cc.parsedTarget.Endpoint - } else { - // Use endpoint from "scheme://authority/endpoint" as the default - // authority for ClientConn. - cc.authority = cc.parsedTarget.Endpoint + if err = cc.determineAuthority(); err != nil { + return nil, err } - if cc.dopts.scChan != nil && !scSet { + if cc.dopts.scChan != nil { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: @@ -296,55 +258,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + logger.Info("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper() + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() + + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err } - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + if exitedIdle { + cc.addTraceEvent("exiting idle mode") } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + logger.Error("ClientConn asked to enter idle mode when not active") + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") + cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -415,7 +546,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -481,43 +612,67 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idlenessManager + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -536,14 +691,29 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() +} + func (cc *ClientConn) scWatcher() { for { select { @@ -621,9 +791,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -631,7 +799,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var ret error - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { cc.maybeApplyDefaultServiceConfig(s.Addresses) // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? @@ -648,16 +819,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -665,24 +830,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -691,51 +844,42 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - cc.balancerWrapper.close() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) +} + +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -745,32 +889,36 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.mu.Lock() + defer cc.mu.Unlock() if cc.conns == nil { - cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} - cc.mu.Unlock() return ac, nil } @@ -800,7 +948,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -827,67 +975,114 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - // Start a goroutine connecting to the server asynchronously. - go ac.resetTransport() + ac.resetTransport() return nil } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + + addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if ac.state == connectivity.Connecting { - return false + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => Close => onClose, which + // requires locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority } func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { @@ -928,15 +1123,11 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) - if err != nil { - return nil, nil, toRPCErr(err) - } - return t, done, nil } func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { @@ -961,35 +1152,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1011,7 +1193,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1033,44 +1215,45 @@ func (cc *ClientConn) Close() error { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() - cc.blockingpicker.close() - - if rWrapper != nil { - rWrapper.close() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + if pWrapper != nil { + pWrapper.close() } if bWrapper != nil { bWrapper.close() } + if rWrapper != nil { + rWrapper.close() + } + if idlenessMgr != nil { + idlenessMgr.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) - } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1095,12 +1278,13 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1109,8 +1293,15 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1129,113 +1320,86 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } func (ac *addrConn) resetTransport() { - for i := 0; ; i++ { - if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOptions{}) - } + ac.mu.Lock() + acCtx := ac.ctx + if acCtx.Err() != nil { + ac.mu.Unlock() + return + } - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } - addrs := ac.addrs - backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) - // This will be the duration that dial gets to finish. - dialDuration := minConnectTimeout - if ac.dopts.minConnectTimeout != nil { - dialDuration = ac.dopts.minConnectTimeout() - } + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() - if dialDuration < backoffFor { - // Give dial more time as we keep failing to connect. - dialDuration = backoffFor + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + if acCtx.Err() != nil { + return } - // We can potentially spend all the time trying the first address, and - // if the server accepts the connection and then hangs, the following - // addresses will never be tried. - // - // The spec doesn't mention what should be done for multiple addresses. - // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm - connectDeadline := time.Now().Add(dialDuration) + ac.mu.Lock() + ac.updateConnectivityState(connectivity.TransientFailure, err) - ac.updateConnectivityState(connectivity.Connecting, nil) - ac.transport = nil + // Backoff. + b := ac.resetBackoff ac.mu.Unlock() - newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) - if err != nil { - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return - } - ac.updateConnectivityState(connectivity.TransientFailure, err) - - // Backoff. - b := ac.resetBackoff + ac.backoffIdx++ ac.mu.Unlock() - - timer := time.NewTimer(backoffFor) - select { - case <-timer.C: - ac.mu.Lock() - ac.backoffIdx++ - ac.mu.Unlock() - case <-b: - timer.Stop() - case <-ac.ctx.Done(): - timer.Stop() - return - } - continue + case <-b: + timer.Stop() + case <-acCtx.Done(): + timer.Stop() + return } ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - newTr.Close() - return + if acCtx.Err() == nil { + ac.updateConnectivityState(connectivity.Idle, err) } - ac.curAddr = addr - ac.transport = newTr - ac.backoffIdx = 0 - - hctx, hcancel := context.WithCancel(ac.ctx) - ac.startHealthCheck(hctx) ac.mu.Unlock() - - // Block until the created transport is down. And when this happens, - // we restart from the top of the addr list. - <-reconnect.Done() - hcancel() - // restart connecting - the top of the loop will set state to - // CONNECTING. This is against the current connectivity semantics doc, - // however it allows for graceful behavior for RPCs not yet dispatched - // - unfortunate timing would otherwise lead to the RPC failing even - // though the TRANSIENT_FAILURE state (called for by the doc) would be - // instantaneous. - // - // Ideally we should transition to Idle here and block until there is - // RPC activity that leads to the balancer requesting a reconnect of - // the associated SubConn. + return } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at the -// first successful one. It returns the transport, the address and a Event in -// the successful case. The Event fires when the returned transport disconnects. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() - return nil, resolver.Address{}, nil, errConnClosing + if ctx.Err() != nil { + return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1249,9 +1413,9 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { - return newTr, addr, reconnect, nil + return nil } if firstConnErr == nil { firstConnErr = err @@ -1260,86 +1424,90 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, firstConnErr + return firstConnErr } -// createTransport creates a connection to addr. It returns the transport and a -// Event in the successful case. The Event fires when the returned transport -// disconnects. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { - prefaceReceived := make(chan struct{}) - onCloseCalled := make(chan struct{}) - reconnect := grpcsync.NewEvent() - - // addr.ServerName takes precedent over ClientConn authority, if present. - if addr.ServerName == "" { - addr.ServerName = ac.cc.authority - } +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ctx) - once := sync.Once{} - onGoAway := func(r transport.GoAwayReason) { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() + defer ac.mu.Unlock() + // adjust params based on GoAwayReason ac.adjustParams(r) - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - reconnect.Fire() - } - - onClose := func() { - ac.mu.Lock() - once.Do(func() { - if ac.state == connectivity.Ready { - // Prevent this SubConn from being used for new RPCs by setting its - // state to Connecting. - // - // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting, nil) - } - }) - ac.mu.Unlock() - close(onCloseCalled) - reconnect.Fire() - } - - onPrefaceReceipt := func() { - close(prefaceReceived) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) - return nil, nil, err + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err } - select { - case <-time.After(time.Until(connectDeadline)): - // We didn't get the preface in time. - newTr.Close() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) - return nil, nil, errors.New("timed out waiting for server handshake") - case <-prefaceReceived: - // We got the preface - huzzah! things are good. - case <-onCloseCalled: - // The transport has already closed - noop. - return nil, nil, errors.New("connection closed") - // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) + return nil } - return newTr, reconnect, nil + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil + } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1409,7 +1577,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() @@ -1423,33 +1591,43 @@ func (ac *addrConn) resetConnectBackoff() { ac.mu.Unlock() } -// getReadyTransport returns the transport if ac's state is READY. -// Otherwise it returns nil, false. -// If ac's state is IDLE, it will trigger ac to connect. -func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { ac.mu.Lock() - if ac.state == connectivity.Ready && ac.transport != nil { - t := ac.transport - ac.mu.Unlock() - return t, true - } - var idle bool - if ac.state == connectivity.Idle { - idle = true + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport } - ac.mu.Unlock() - // Trigger idle ac to connect. - if idle { - ac.connect() + return nil +} + +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } } - return nil, false + return nil, status.FromContextError(ctx.Err()).Err() } // tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. func (ac *addrConn) tearDown(err error) { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -1473,19 +1651,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } @@ -1574,6 +1751,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { @@ -1594,3 +1774,206 @@ func (cc *ClientConn) connectionError() error { defer cc.lceMu.Unlock() return cc.lastConnectionError } + +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + target := cc.target + switch { + case authorityFromDialOption != "": + cc.authority = authorityFromDialOption + case authorityFromCreds != "": + cc.authority = authorityFromCreds + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + cc.authority = "localhost" + case strings.HasPrefix(endpoint, ":"): + cc.authority = "localhost" + endpoint + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil +} diff --git a/clientconn_authority_test.go b/clientconn_authority_test.go new file mode 100644 index 000000000000..3efb2ae8571e --- /dev/null +++ b/clientconn_authority_test.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "testing" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/testdata" +) + +func (s) TestClientConnAuthority(t *testing.T) { + serverNameOverride := "over.write.server.name" + creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), serverNameOverride) + if err != nil { + t.Fatalf("credentials.NewClientTLSFromFile(_, %q) failed: %v", err, serverNameOverride) + } + + tests := []struct { + name string + target string + opts []DialOption + wantAuthority string + }{ + { + name: "default", + target: "Non-Existent.Server:8080", + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, + wantAuthority: "Non-Existent.Server:8080", + }, + { + name: "override-via-creds", + target: "Non-Existent.Server:8080", + opts: []DialOption{WithTransportCredentials(creds)}, + wantAuthority: serverNameOverride, + }, + { + name: "override-via-WithAuthority", + target: "Non-Existent.Server:8080", + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithAuthority("authority-override")}, + wantAuthority: "authority-override", + }, + { + name: "override-via-creds-and-WithAuthority", + target: "Non-Existent.Server:8080", + opts: []DialOption{WithTransportCredentials(creds), WithAuthority(serverNameOverride)}, + wantAuthority: serverNameOverride, + }, + { + name: "unix relative", + target: "unix:sock.sock", + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, + wantAuthority: "localhost", + }, + { + name: "unix relative with custom dialer", + target: "unix:sock.sock", + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "", addr) + })}, + wantAuthority: "localhost", + }, + { + name: "unix absolute", + target: "unix:/sock.sock", + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, + wantAuthority: "localhost", + }, + { + name: "unix absolute with custom dialer", + target: "unix:///sock.sock", + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, "", addr) + })}, + wantAuthority: "localhost", + }, + { + name: "localhost colon port", + target: "localhost:50051", + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, + wantAuthority: "localhost:50051", + }, + { + name: "colon port", + target: ":50051", + opts: []DialOption{WithTransportCredentials(insecure.NewCredentials())}, + wantAuthority: "localhost:50051", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cc, err := Dial(test.target, test.opts...) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", test.target, err) + } + defer cc.Close() + if cc.authority != test.wantAuthority { + t.Fatalf("cc.authority = %q, want %q", cc.authority, test.wantAuthority) + } + }) + } +} + +func (s) TestClientConnAuthority_CredsAndDialOptionMismatch(t *testing.T) { + serverNameOverride := "over.write.server.name" + creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), serverNameOverride) + if err != nil { + t.Fatalf("credentials.NewClientTLSFromFile(_, %q) failed: %v", err, serverNameOverride) + } + opts := []DialOption{WithTransportCredentials(creds), WithAuthority("authority-override")} + if cc, err := Dial("Non-Existent.Server:8000", opts...); err == nil { + cc.Close() + t.Fatal("grpc.Dial() succeeded when expected to fail") + } +} diff --git a/clientconn_parsed_target_test.go b/clientconn_parsed_target_test.go new file mode 100644 index 000000000000..1ff46aaf08c7 --- /dev/null +++ b/clientconn_parsed_target_test.go @@ -0,0 +1,183 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "net" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils" + + "google.golang.org/grpc/resolver" +) + +func (s) TestParsedTarget_Success_WithoutCustomDialer(t *testing.T) { + defScheme := resolver.GetDefaultScheme() + tests := []struct { + target string + wantParsed resolver.Target + }{ + // No scheme is specified. + {target: "://a/b", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://a/b"))}}, + {target: "a//b", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a//b"))}}, + + // An unregistered scheme is specified. + {target: "a:///", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:///"))}}, + {target: "a:b", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "a:b"))}}, + + // A registered scheme is specified. + {target: "dns://a.server.com/google.com", wantParsed: resolver.Target{URL: *testutils.MustParseURL("dns://a.server.com/google.com")}}, + {target: "unix-abstract:/ a///://::!@#$%25^&*()b", wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix-abstract:/ a///://::!@#$%25^&*()b")}}, + {target: "unix-abstract:passthrough:abc", wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix-abstract:passthrough:abc")}}, + {target: "passthrough:///unix:///a/b/c", wantParsed: resolver.Target{URL: *testutils.MustParseURL("passthrough:///unix:///a/b/c")}}, + + // Cases for `scheme:absolute-path`. + {target: "dns:/a/b/c", wantParsed: resolver.Target{URL: *testutils.MustParseURL("dns:/a/b/c")}}, + {target: "unregistered:/a/b/c", wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "unregistered:/a/b/c"))}}, + } + + for _, test := range tests { + t.Run(test.target, func(t *testing.T) { + cc, err := Dial(test.target, WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", test.target, err) + } + defer cc.Close() + + if !cmp.Equal(cc.parsedTarget, test.wantParsed) { + t.Errorf("cc.parsedTarget for dial target %q = %+v, want %+v", test.target, cc.parsedTarget, test.wantParsed) + } + }) + } +} + +func (s) TestParsedTarget_Failure_WithoutCustomDialer(t *testing.T) { + targets := []string{ + "", + "unix://a/b/c", + "unix://authority", + "unix-abstract://authority/a/b/c", + "unix-abstract://authority", + } + + for _, target := range targets { + t.Run(target, func(t *testing.T) { + if cc, err := Dial(target, WithTransportCredentials(insecure.NewCredentials())); err == nil { + defer cc.Close() + t.Fatalf("Dial(%q) succeeded cc.parsedTarget = %+v, expected to fail", target, cc.parsedTarget) + } + }) + } +} + +func (s) TestParsedTarget_WithCustomDialer(t *testing.T) { + defScheme := resolver.GetDefaultScheme() + tests := []struct { + target string + wantParsed resolver.Target + wantDialerAddress string + }{ + // unix:[local_path], unix:[/absolute], and unix://[/absolute] have + // different behaviors with a custom dialer. + { + target: "unix:a/b/c", + wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix:a/b/c")}, + wantDialerAddress: "unix:a/b/c", + }, + { + target: "unix:/a/b/c", + wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix:/a/b/c")}, + wantDialerAddress: "unix:///a/b/c", + }, + { + target: "unix:///a/b/c", + wantParsed: resolver.Target{URL: *testutils.MustParseURL("unix:///a/b/c")}, + wantDialerAddress: "unix:///a/b/c", + }, + { + target: "dns:///127.0.0.1:50051", + wantParsed: resolver.Target{URL: *testutils.MustParseURL("dns:///127.0.0.1:50051")}, + wantDialerAddress: "127.0.0.1:50051", + }, + { + target: ":///127.0.0.1:50051", + wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ":///127.0.0.1:50051"))}, + wantDialerAddress: ":///127.0.0.1:50051", + }, + { + target: "dns://authority/127.0.0.1:50051", + wantParsed: resolver.Target{URL: *testutils.MustParseURL("dns://authority/127.0.0.1:50051")}, + wantDialerAddress: "127.0.0.1:50051", + }, + { + target: "://authority/127.0.0.1:50051", + wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "://authority/127.0.0.1:50051"))}, + wantDialerAddress: "://authority/127.0.0.1:50051", + }, + { + target: "/unix/socket/address", + wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, "/unix/socket/address"))}, + wantDialerAddress: "/unix/socket/address", + }, + { + target: "", + wantParsed: resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("%s:///%s", defScheme, ""))}, + wantDialerAddress: "", + }, + { + target: "passthrough://a.server.com/google.com", + wantParsed: resolver.Target{URL: *testutils.MustParseURL("passthrough://a.server.com/google.com")}, + wantDialerAddress: "google.com", + }, + } + + for _, test := range tests { + t.Run(test.target, func(t *testing.T) { + addrCh := make(chan string, 1) + dialer := func(ctx context.Context, address string) (net.Conn, error) { + addrCh <- address + return nil, errors.New("dialer error") + } + + cc, err := Dial(test.target, WithTransportCredentials(insecure.NewCredentials()), WithContextDialer(dialer)) + if err != nil { + t.Fatalf("Dial(%q) failed: %v", test.target, err) + } + defer cc.Close() + + select { + case addr := <-addrCh: + if addr != test.wantDialerAddress { + t.Fatalf("address in custom dialer is %q, want %q", addr, test.wantDialerAddress) + } + case <-time.After(time.Second): + t.Fatal("timeout when waiting for custom dialer to be invoked") + } + if !cmp.Equal(cc.parsedTarget, test.wantParsed) { + t.Errorf("cc.parsedTarget for dial target %q = %+v, want %+v", test.target, cc.parsedTarget, test.wantParsed) + } + }) + } +} diff --git a/clientconn_test.go b/clientconn_test.go index 6c61666b7efb..281c9618606f 100644 --- a/clientconn_test.go +++ b/clientconn_test.go @@ -25,22 +25,47 @@ import ( "math" "net" "strings" + "sync" "sync/atomic" "testing" "time" "golang.org/x/net/http2" "google.golang.org/grpc/backoff" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/testdata" ) +const ( + defaultTestTimeout = 10 * time.Second + stateRecordingBalancerName = "state_recording_balancer" +) + +var testBalancerBuilder = newStateRecordingBalancerBuilder() + +func init() { + balancer.Register(testBalancerBuilder) +} + +func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { + scpr := r.CC.ParseServiceConfig(s) + if scpr.Err != nil { + panic(fmt.Sprintf("Error parsing config %q: %v", s, scpr.Err)) + } + return scpr +} + func (s) TestDialWithTimeout(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -68,7 +93,7 @@ func (s) TestDialWithTimeout(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{lisAddr}}) - client, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithTimeout(5*time.Second)) + client, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithTimeout(5*time.Second)) close(dialDone) if err != nil { t.Fatalf("Dial failed. Err: %v", err) @@ -120,7 +145,7 @@ func (s) TestDialWithMultipleBackendsNotSendingServerPreface(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{lis1Addr, lis2Addr}}) - client, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + client, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("Dial failed. Err: %v", err) } @@ -170,7 +195,7 @@ func (s) TestDialWaitsForServerSettings(t *testing.T) { }() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - client, err := DialContext(ctx, lis.Addr().String(), WithInsecure(), WithBlock()) + client, err := DialContext(ctx, lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials()), WithBlock()) close(dialDone) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) @@ -208,16 +233,18 @@ func (s) TestDialWaitsForServerSettingsAndFails(t *testing.T) { defer cancel() client, err := DialContext(ctx, lis.Addr().String(), - WithInsecure(), + WithTransportCredentials(insecure.NewCredentials()), WithReturnConnectionError(), - withBackoff(noBackoff{}), - withMinConnectDeadline(func() time.Duration { return time.Second / 4 })) + WithConnectParams(ConnectParams{ + Backoff: backoff.Config{}, + MinConnectTimeout: 250 * time.Millisecond, + })) lis.Close() if err == nil { client.Close() t.Fatalf("Unexpected success (err=nil) while dialing") } - expectedMsg := "server handshake" + expectedMsg := "server preface" if !strings.Contains(err.Error(), context.DeadlineExceeded.Error()) || !strings.Contains(err.Error(), expectedMsg) { t.Fatalf("DialContext(_) = %v; want a message that includes both %q and %q", err, context.DeadlineExceeded.Error(), expectedMsg) } @@ -285,10 +312,13 @@ func (s) TestCloseConnectionWhenServerPrefaceNotReceived(t *testing.T) { break } }() - client, err := Dial(lis.Addr().String(), WithInsecure(), withMinConnectDeadline(func() time.Duration { return time.Millisecond * 500 })) + client, err := Dial(lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials()), withMinConnectDeadline(func() time.Duration { return time.Millisecond * 500 })) if err != nil { t.Fatalf("Error while dialing. Err: %v", err) } + + go stayConnected(client) + // wait for connection to be accepted on the server. timer := time.NewTimer(time.Second * 10) select { @@ -306,14 +336,12 @@ func (s) TestCloseConnectionWhenServerPrefaceNotReceived(t *testing.T) { func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { - t.Fatalf("Error while listening. Err: %v", err) + t.Fatalf("Unexpected error from net.Listen(%q, %q): %v", "tcp", "localhost:0", err) } defer lis.Close() done := make(chan struct{}) go func() { // Launch the server. - defer func() { - close(done) - }() + defer close(done) conn, err := lis.Accept() // Accept the connection only to close it immediately. if err != nil { t.Errorf("Error while accepting. Err: %v", err) @@ -340,17 +368,30 @@ func (s) TestBackoffWhenNoServerPrefaceReceived(t *testing.T) { prevAt = meow } }() - client, err := Dial(lis.Addr().String(), WithInsecure()) + bc := backoff.Config{ + BaseDelay: 200 * time.Millisecond, + Multiplier: 2.0, + Jitter: 0, + MaxDelay: 120 * time.Second, + } + cp := ConnectParams{ + Backoff: bc, + MinConnectTimeout: 1 * time.Second, + } + cc, err := Dial(lis.Addr().String(), WithTransportCredentials(insecure.NewCredentials()), WithConnectParams(cp)) if err != nil { - t.Fatalf("Error while dialing. Err: %v", err) + t.Fatalf("Unexpected error from Dial(%v) = %v", lis.Addr(), err) } - defer client.Close() + defer cc.Close() + go stayConnected(cc) <-done - } func (s) TestWithTimeout(t *testing.T) { - conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTimeout(time.Millisecond), WithBlock(), WithInsecure()) + conn, err := Dial("passthrough:///Non-Existent.Server:80", + WithTimeout(time.Millisecond), + WithBlock(), + WithTransportCredentials(insecure.NewCredentials())) if err == nil { conn.Close() } @@ -375,62 +416,6 @@ func (s) TestWithTransportCredentialsTLS(t *testing.T) { } } -func (s) TestDefaultAuthority(t *testing.T) { - target := "Non-Existent.Server:8080" - conn, err := Dial(target, WithInsecure()) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v, want _, ", err) - } - defer conn.Close() - if conn.authority != target { - t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, target) - } -} - -func (s) TestTLSServerNameOverwrite(t *testing.T) { - overwriteServerName := "over.write.server.name" - creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), overwriteServerName) - if err != nil { - t.Fatalf("Failed to create credentials %v", err) - } - conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(creds)) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v, want _, ", err) - } - defer conn.Close() - if conn.authority != overwriteServerName { - t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) - } -} - -func (s) TestWithAuthority(t *testing.T) { - overwriteServerName := "over.write.server.name" - conn, err := Dial("passthrough:///Non-Existent.Server:80", WithInsecure(), WithAuthority(overwriteServerName)) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v, want _, ", err) - } - defer conn.Close() - if conn.authority != overwriteServerName { - t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) - } -} - -func (s) TestWithAuthorityAndTLS(t *testing.T) { - overwriteServerName := "over.write.server.name" - creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), overwriteServerName) - if err != nil { - t.Fatalf("Failed to create credentials %v", err) - } - conn, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(creds), WithAuthority("no.effect.authority")) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v, want _, ", err) - } - defer conn.Close() - if conn.authority != overwriteServerName { - t.Fatalf("%v.authority = %v, want %v", conn, conn.authority, overwriteServerName) - } -} - // When creating a transport configured with n addresses, only calculate the // backoff once per "round" of attempts instead of once per address (n times // per "round" of attempts). @@ -493,8 +478,7 @@ func (s) TestDial_OneBackoffPerRetryGroup(t *testing.T) { {Addr: lis2.Addr().String()}, }}) client, err := DialContext(ctx, "whatever:///this-gets-overwritten", - WithInsecure(), - WithBalancerName(stateRecordingBalancerName), + WithTransportCredentials(insecure.NewCredentials()), WithResolvers(rb), withMinConnectDeadline(getMinConnectTimeout)) if err != nil { @@ -520,7 +504,7 @@ func (s) TestDial_OneBackoffPerRetryGroup(t *testing.T) { func (s) TestDialContextCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - if _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure()); err != context.Canceled { + if _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithTransportCredentials(insecure.NewCredentials())); err != context.Canceled { t.Fatalf("DialContext(%v, _) = _, %v, want _, %v", ctx, err, context.Canceled) } } @@ -538,36 +522,59 @@ func (s) TestDialContextFailFast(t *testing.T) { return nil, failErr } - _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithInsecure(), WithDialer(dialer), FailOnNonTempDialError(true)) + _, err := DialContext(ctx, "Non-Existent.Server:80", WithBlock(), WithTransportCredentials(insecure.NewCredentials()), WithDialer(dialer), FailOnNonTempDialError(true)) if terr, ok := err.(transport.ConnectionError); !ok || terr.Origin() != failErr { t.Fatalf("DialContext() = _, %v, want _, %v", err, failErr) } } // securePerRPCCredentials always requires transport security. -type securePerRPCCredentials struct{} - -func (c securePerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return nil, nil +type securePerRPCCredentials struct { + credentials.PerRPCCredentials } func (c securePerRPCCredentials) RequireTransportSecurity() bool { return true } +type fakeBundleCreds struct { + credentials.Bundle + transportCreds credentials.TransportCredentials +} + +func (b *fakeBundleCreds) TransportCredentials() credentials.TransportCredentials { + return b.transportCreds +} + func (s) TestCredentialsMisuse(t *testing.T) { - tlsCreds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com") + // Use of no transport creds and no creds bundle must fail. + if _, err := Dial("passthrough:///Non-Existent.Server:80"); err != errNoTransportSecurity { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errNoTransportSecurity) + } + + // Use of both transport creds and creds bundle must fail. + creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com") if err != nil { t.Fatalf("Failed to create authenticator %v", err) } - // Two conflicting credential configurations - if _, err := Dial("passthrough:///Non-Existent.Server:80", WithTransportCredentials(tlsCreds), WithBlock(), WithInsecure()); err != errCredentialsConflict { - t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errCredentialsConflict) + dopts := []DialOption{ + WithTransportCredentials(creds), + WithCredentialsBundle(&fakeBundleCreds{transportCreds: creds}), + } + if _, err := Dial("passthrough:///Non-Existent.Server:80", dopts...); err != errTransportCredsAndBundle { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredsAndBundle) } - // security info on insecure connection - if _, err := Dial("passthrough:///Non-Existent.Server:80", WithPerRPCCredentials(securePerRPCCredentials{}), WithBlock(), WithInsecure()); err != errTransportCredentialsMissing { + + // Use of perRPC creds requiring transport security over an insecure + // transport must fail. + if _, err := Dial("passthrough:///Non-Existent.Server:80", WithPerRPCCredentials(securePerRPCCredentials{}), WithTransportCredentials(insecure.NewCredentials())); err != errTransportCredentialsMissing { t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredentialsMissing) } + + // Use of a creds bundle with nil transport credentials must fail. + if _, err := Dial("passthrough:///Non-Existent.Server:80", WithCredentialsBundle(&fakeBundleCreds{})); err != errNoTransportCredsInBundle { + t.Fatalf("Dial(_, _) = _, %v, want _, %v", err, errTransportCredsAndBundle) + } } func (s) TestWithBackoffConfigDefault(t *testing.T) { @@ -604,7 +611,7 @@ func (s) TestWithConnectParams(t *testing.T) { } func testBackoffConfigSet(t *testing.T, wantBackoff internalbackoff.Exponential, opts ...DialOption) { - opts = append(opts, WithInsecure()) + opts = append(opts, WithTransportCredentials(insecure.NewCredentials())) conn, err := Dial("passthrough:///foo:80", opts...) if err != nil { t.Fatalf("unexpected error dialing connection: %v", err) @@ -628,7 +635,7 @@ func testBackoffConfigSet(t *testing.T, wantBackoff internalbackoff.Exponential, func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) { // Default value specified for minConnectTimeout in the spec is 20 seconds. mct := 1 * time.Minute - conn, err := Dial("passthrough:///foo:80", WithInsecure(), WithConnectParams(ConnectParams{MinConnectTimeout: mct})) + conn, err := Dial("passthrough:///foo:80", WithTransportCredentials(insecure.NewCredentials()), WithConnectParams(ConnectParams{MinConnectTimeout: mct})) if err != nil { t.Fatalf("unexpected error dialing connection: %v", err) } @@ -642,7 +649,7 @@ func (s) TestConnectParamsWithMinConnectTimeout(t *testing.T) { func (s) TestResolverServiceConfigBeforeAddressNotPanic(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -659,7 +666,7 @@ func (s) TestResolverServiceConfigWhileClosingNotPanic(t *testing.T) { for i := 0; i < 10; i++ { // Run this multiple times to make sure it doesn't panic. r := manual.NewBuilderWithScheme(fmt.Sprintf("whatever-%d", i)) - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -672,7 +679,7 @@ func (s) TestResolverServiceConfigWhileClosingNotPanic(t *testing.T) { func (s) TestResolverEmptyUpdateNotPanic(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) + cc, err := Dial(r.Scheme()+":///test.server", WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r)) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -685,12 +692,15 @@ func (s) TestResolverEmptyUpdateNotPanic(t *testing.T) { } func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to listen. Err: %v", err) } defer lis.Close() - connected := make(chan struct{}) + connected := grpcsync.NewEvent() + defer connected.Fire() go func() { conn, err := lis.Accept() if err != nil { @@ -712,7 +722,7 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { t.Errorf("error writing settings: %v", err) return } - <-connected + <-connected.Done() if err := f.WriteGoAway(0, http2.ErrCodeEnhanceYourCalm, []byte("too_many_pings")); err != nil { t.Errorf("error writing GOAWAY: %v", err) return @@ -721,7 +731,7 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { addr := lis.Addr().String() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - cc, err := DialContext(ctx, addr, WithBlock(), WithInsecure(), WithKeepaliveParams(keepalive.ClientParameters{ + cc, err := DialContext(ctx, addr, WithBlock(), WithTransportCredentials(insecure.NewCredentials()), WithKeepaliveParams(keepalive.ClientParameters{ Time: 10 * time.Second, Timeout: 100 * time.Millisecond, PermitWithoutStream: true, @@ -730,28 +740,27 @@ func (s) TestClientUpdatesParamsAfterGoAway(t *testing.T) { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } defer cc.Close() - close(connected) + connected.Fire() for { time.Sleep(10 * time.Millisecond) cc.mu.RLock() v := cc.mkp.Time + cc.mu.RUnlock() if v == 20*time.Second { // Success - cc.mu.RUnlock() return } if ctx.Err() != nil { // Timeout t.Fatalf("cc.dopts.copts.Keepalive.Time = %v , want 20s", v) } - cc.mu.RUnlock() } } func (s) TestDisableServiceConfigOption(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") addr := r.Scheme() + ":///non.existent" - cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDisableServiceConfig()) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithDisableServiceConfig()) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -778,7 +787,7 @@ func (s) TestDisableServiceConfigOption(t *testing.T) { func (s) TestMethodConfigDefaultService(t *testing.T) { addr := "nonexist:///non.existent" - cc, err := Dial(addr, WithInsecure(), WithDefaultServiceConfig(`{ + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithDefaultServiceConfig(`{ "methodConfig": [{ "name": [ { @@ -801,7 +810,7 @@ func (s) TestMethodConfigDefaultService(t *testing.T) { func (s) TestGetClientConnTarget(t *testing.T) { addr := "nonexist:///non.existent" - cc, err := Dial(addr, WithInsecure()) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -827,11 +836,12 @@ func (s) TestResetConnectBackoff(t *testing.T) { dials <- struct{}{} return nil, errors.New("failed to fake dial") } - cc, err := Dial("any", WithInsecure(), WithDialer(dialer), withBackoff(backoffForever{})) + cc, err := Dial("any", WithTransportCredentials(insecure.NewCredentials()), WithDialer(dialer), withBackoff(backoffForever{})) if err != nil { t.Fatalf("Dial() = _, %v; want _, nil", err) } defer cc.Close() + go stayConnected(cc) select { case <-dials: case <-time.NewTimer(10 * time.Second).C: @@ -855,21 +865,26 @@ func (s) TestResetConnectBackoff(t *testing.T) { func (s) TestBackoffCancel(t *testing.T) { dialStrCh := make(chan string) - cc, err := Dial("any", WithInsecure(), WithDialer(func(t string, _ time.Duration) (net.Conn, error) { + cc, err := Dial("any", WithTransportCredentials(insecure.NewCredentials()), WithDialer(func(t string, _ time.Duration) (net.Conn, error) { dialStrCh <- t return nil, fmt.Errorf("test dialer, always error") })) if err != nil { t.Fatalf("Failed to create ClientConn: %v", err) } - <-dialStrCh - cc.Close() - // Should not leak. May need -count 5000 to exercise. + defer cc.Close() + + select { + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout when waiting for custom dialer to be invoked during Dial") + case <-dialStrCh: + } } -// UpdateAddresses should cause the next reconnect to begin from the top of the -// list if the connection is not READY. -func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { +// TestUpdateAddresses_NoopIfCalledWithSameAddresses tests that UpdateAddresses +// should be noop if UpdateAddresses is called with the same list of addresses, +// even when the SubConn is in Connecting and doesn't have a current address. +func (s) TestUpdateAddresses_NoopIfCalledWithSameAddresses(t *testing.T) { lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) @@ -889,12 +904,15 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { defer lis3.Close() closeServer2 := make(chan struct{}) + exitCh := make(chan struct{}) server1ContactedFirstTime := make(chan struct{}) server1ContactedSecondTime := make(chan struct{}) server2ContactedFirstTime := make(chan struct{}) server2ContactedSecondTime := make(chan struct{}) server3Contacted := make(chan struct{}) + defer close(exitCh) + // Launch server 1. go func() { // First, let's allow the initial connection to go READY. We need to do @@ -918,12 +936,18 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { // until balancer is built to process the addresses. stateNotifications := testBalancerBuilder.nextStateNotifier() // Wait for the transport to become ready. - for s := range stateNotifications { - if s == connectivity.Ready { - break + for { + select { + case st := <-stateNotifications: + if st == connectivity.Ready { + goto ready + } + case <-exitCh: + return } } + ready: // Once it's ready, curAddress has been set. So let's close this // connection prompting the first reconnect cycle. conn1.Close() @@ -977,15 +1001,18 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { rb.InitialState(resolver.State{Addresses: addrsList}) client, err := Dial("whatever:///this-gets-overwritten", - WithInsecure(), + WithTransportCredentials(insecure.NewCredentials()), WithResolvers(rb), - withBackoff(noBackoff{}), - WithBalancerName(stateRecordingBalancerName), - withMinConnectDeadline(func() time.Duration { return time.Hour })) + WithConnectParams(ConnectParams{ + Backoff: backoff.Config{}, + MinConnectTimeout: time.Hour, + }), + WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName))) if err != nil { t.Fatal(err) } defer client.Close() + go stayConnected(client) timeout := time.After(5 * time.Second) @@ -1011,19 +1038,20 @@ func (s) TestUpdateAddresses_RetryFromFirstAddr(t *testing.T) { } client.mu.Unlock() + // Call UpdateAddresses with the same list of addresses, it should be a noop + // (even when the SubConn is Connecting, and doesn't have a curAddr). ac.acbw.UpdateAddresses(addrsList) // We've called tryUpdateAddrs - now let's make server2 close the - // connection and check that it goes back to server1 instead of continuing - // to server3 or trying server2 again. + // connection and check that it continues to server3. close(closeServer2) select { case <-server1ContactedSecondTime: + t.Fatal("server1 was contacted a second time, but it should have continued to server 3") case <-server2ContactedSecondTime: - t.Fatal("server2 was contacted a second time, but it after tryUpdateAddrs it should have re-started the list and tried server1") + t.Fatal("server2 was contacted a second time, but it should have continued to server 3") case <-server3Contacted: - t.Fatal("server3 was contacted, but after tryUpdateAddrs it should have re-started the list and tried server1") case <-timeout: t.Fatal("timed out waiting for any server to be contacted after tryUpdateAddrs") } @@ -1064,14 +1092,14 @@ func verifyWaitForReadyEqualsTrue(cc *ClientConn) bool { } func testInvalidDefaultServiceConfig(t *testing.T) { - _, err := Dial("fake.com", WithInsecure(), WithDefaultServiceConfig("")) + _, err := Dial("fake.com", WithTransportCredentials(insecure.NewCredentials()), WithDefaultServiceConfig("")) if !strings.Contains(err.Error(), invalidDefaultServiceConfigErrPrefix) { t.Fatalf("Dial got err: %v, want err contains: %v", err, invalidDefaultServiceConfigErrPrefix) } } func testDefaultServiceConfigWhenResolverServiceConfigDisabled(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithDisableServiceConfig(), WithResolvers(r), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithDisableServiceConfig(), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -1087,7 +1115,7 @@ func testDefaultServiceConfigWhenResolverServiceConfigDisabled(t *testing.T, r * } func testDefaultServiceConfigWhenResolverDoesNotReturnServiceConfig(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -1101,7 +1129,7 @@ func testDefaultServiceConfigWhenResolverDoesNotReturnServiceConfig(t *testing.T } func testDefaultServiceConfigWhenResolverReturnInvalidServiceConfig(t *testing.T, r *manual.Resolver, addr string, js string) { - cc, err := Dial(addr, WithInsecure(), WithResolvers(r), WithDefaultServiceConfig(js)) + cc, err := Dial(addr, WithTransportCredentials(insecure.NewCredentials()), WithResolvers(r), WithDefaultServiceConfig(js)) if err != nil { t.Fatalf("Dial(%s, _) = _, %v, want _, ", addr, err) } @@ -1113,3 +1141,120 @@ func testDefaultServiceConfigWhenResolverReturnInvalidServiceConfig(t *testing.T t.Fatal("default service config failed to be applied after 1s") } } + +type stateRecordingBalancer struct { + notifier chan<- connectivity.State + balancer.Balancer +} + +func (b *stateRecordingBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + b.notifier <- s.ConnectivityState + b.Balancer.UpdateSubConnState(sc, s) +} + +func (b *stateRecordingBalancer) ResetNotifier(r chan<- connectivity.State) { + b.notifier = r +} + +func (b *stateRecordingBalancer) Close() { + b.Balancer.Close() +} + +type stateRecordingBalancerBuilder struct { + mu sync.Mutex + notifier chan connectivity.State // The notifier used in the last Balancer. +} + +func newStateRecordingBalancerBuilder() *stateRecordingBalancerBuilder { + return &stateRecordingBalancerBuilder{} +} + +func (b *stateRecordingBalancerBuilder) Name() string { + return stateRecordingBalancerName +} + +func (b *stateRecordingBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + stateNotifications := make(chan connectivity.State, 10) + b.mu.Lock() + b.notifier = stateNotifications + b.mu.Unlock() + return &stateRecordingBalancer{ + notifier: stateNotifications, + Balancer: balancer.Get("pick_first").Build(cc, opts), + } +} + +func (b *stateRecordingBalancerBuilder) nextStateNotifier() <-chan connectivity.State { + b.mu.Lock() + defer b.mu.Unlock() + ret := b.notifier + b.notifier = nil + return ret +} + +// Keep reading until something causes the connection to die (EOF, server +// closed, etc). Useful as a tool for mindlessly keeping the connection +// healthy, since the client will error if things like client prefaces are not +// accepted in a timely fashion. +func keepReading(conn net.Conn) { + buf := make([]byte, 1024) + for _, err := conn.Read(buf); err == nil; _, err = conn.Read(buf) { + } +} + +// stayConnected makes cc stay connected by repeatedly calling cc.Connect() +// until the state becomes Shutdown or until 10 seconds elapses. +func stayConnected(cc *ClientConn) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + for { + state := cc.GetState() + switch state { + case connectivity.Idle: + cc.Connect() + case connectivity.Shutdown: + return + } + if !cc.WaitForStateChange(ctx, state) { + return + } + } +} + +func (s) TestURLAuthorityEscape(t *testing.T) { + tests := []struct { + name string + authority string + want string + }{ + { + name: "ipv6_authority", + authority: "[::1]", + want: "[::1]", + }, + { + name: "with_user_and_host", + authority: "userinfo@host:10001", + want: "userinfo@host:10001", + }, + { + name: "with_multiple_slashes", + authority: "projects/123/network/abc/service", + want: "projects%2F123%2Fnetwork%2Fabc%2Fservice", + }, + { + name: "all_possible_allowed_chars", + authority: "abc123-._~!$&'()*+,;=@:[]", + want: "abc123-._~!$&'()*+,;=@:[]", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got, want := encodeAuthority(test.authority), test.want; got != want { + t.Errorf("encodeAuthority(%s) = %s, want %s", test.authority, got, test.want) + } + }) + } +} diff --git a/cmd/protoc-gen-go-grpc/README.md b/cmd/protoc-gen-go-grpc/README.md index 4758125de0d2..a2d4d010212a 100644 --- a/cmd/protoc-gen-go-grpc/README.md +++ b/cmd/protoc-gen-go-grpc/README.md @@ -14,7 +14,7 @@ To restore this behavior, set the option `require_unimplemented_servers=false`. E.g.: ``` - protoc --go-grpc_out=require_unimplemented_servers=false[,other options...]:. \ + protoc --go-grpc_out=. --go-grpc_opt=require_unimplemented_servers=false[,other options...] \ ``` Note that this is not recommended, and the option is only provided to restore diff --git a/cmd/protoc-gen-go-grpc/go.mod b/cmd/protoc-gen-go-grpc/go.mod index d0cfd8ebf56f..d33b4f1d8c6f 100644 --- a/cmd/protoc-gen-go-grpc/go.mod +++ b/cmd/protoc-gen-go-grpc/go.mod @@ -1,5 +1,5 @@ module google.golang.org/grpc/cmd/protoc-gen-go-grpc -go 1.9 +go 1.17 -require google.golang.org/protobuf v1.23.0 +require google.golang.org/protobuf v1.30.0 diff --git a/cmd/protoc-gen-go-grpc/go.sum b/cmd/protoc-gen-go-grpc/go.sum index 92baf2631b73..1838366909dd 100644 --- a/cmd/protoc-gen-go-grpc/go.sum +++ b/cmd/protoc-gen-go-grpc/go.sum @@ -1,18 +1,8 @@ -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/cmd/protoc-gen-go-grpc/grpc.go b/cmd/protoc-gen-go-grpc/grpc.go index 1e787344ebcc..9e15d2d8daf3 100644 --- a/cmd/protoc-gen-go-grpc/grpc.go +++ b/cmd/protoc-gen-go-grpc/grpc.go @@ -24,7 +24,7 @@ import ( "strings" "google.golang.org/protobuf/compiler/protogen" - + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" ) @@ -35,6 +35,94 @@ const ( statusPackage = protogen.GoImportPath("google.golang.org/grpc/status") ) +type serviceGenerateHelperInterface interface { + formatFullMethodSymbol(service *protogen.Service, method *protogen.Method) string + genFullMethods(g *protogen.GeneratedFile, service *protogen.Service) + generateClientStruct(g *protogen.GeneratedFile, clientName string) + generateNewClientDefinitions(g *protogen.GeneratedFile, service *protogen.Service, clientName string) + generateUnimplementedServerType(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) + generateServerFunctions(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service, serverType string, serviceDescVar string) + formatHandlerFuncName(service *protogen.Service, hname string) string +} + +type serviceGenerateHelper struct{} + +func (serviceGenerateHelper) formatFullMethodSymbol(service *protogen.Service, method *protogen.Method) string { + return fmt.Sprintf("%s_%s_FullMethodName", service.GoName, method.GoName) +} + +func (serviceGenerateHelper) genFullMethods(g *protogen.GeneratedFile, service *protogen.Service) { + g.P("const (") + for _, method := range service.Methods { + fmSymbol := helper.formatFullMethodSymbol(service, method) + fmName := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + g.P(fmSymbol, ` = "`, fmName, `"`) + } + g.P(")") + g.P() +} + +func (serviceGenerateHelper) generateClientStruct(g *protogen.GeneratedFile, clientName string) { + g.P("type ", unexport(clientName), " struct {") + g.P("cc ", grpcPackage.Ident("ClientConnInterface")) + g.P("}") + g.P() +} + +func (serviceGenerateHelper) generateNewClientDefinitions(g *protogen.GeneratedFile, service *protogen.Service, clientName string) { + g.P("return &", unexport(clientName), "{cc}") +} + +func (serviceGenerateHelper) generateUnimplementedServerType(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + serverType := service.GoName + "Server" + mustOrShould := "must" + if !*requireUnimplemented { + mustOrShould = "should" + } + // Server Unimplemented struct for forward compatibility. + g.P("// Unimplemented", serverType, " ", mustOrShould, " be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + for _, method := range service.Methods { + nilArg := "" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + nilArg = "nil," + } + g.P("func (Unimplemented", serverType, ") ", serverSignature(g, method), "{") + g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`) + g.P("}") + } + if *requireUnimplemented { + g.P("func (Unimplemented", serverType, ") mustEmbedUnimplemented", serverType, "() {}") + } + g.P() +} + +func (serviceGenerateHelper) generateServerFunctions(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service, serverType string, serviceDescVar string) { + // Server handler implementations. + handlerNames := make([]string, 0, len(service.Methods)) + for _, method := range service.Methods { + hname := genServerMethod(gen, file, g, method, func(hname string) string { + return hname + }) + handlerNames = append(handlerNames, hname) + } + genServiceDesc(file, g, serviceDescVar, serverType, service, handlerNames) +} + +func (serviceGenerateHelper) formatHandlerFuncName(service *protogen.Service, hname string) string { + return hname +} + +var helper serviceGenerateHelperInterface = serviceGenerateHelper{} + +// FileDescriptorProto.package field number +const fileDescriptorProtoPackageFieldNumber = 2 + +// FileDescriptorProto.syntax field number +const fileDescriptorProtoSyntaxFieldNumber = 12 + // generateFile generates a _grpc.pb.go file containing gRPC service definitions. func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { if len(file.Services) == 0 { @@ -42,14 +130,38 @@ func generateFile(gen *protogen.Plugin, file *protogen.File) *protogen.Generated } filename := file.GeneratedFilenamePrefix + "_grpc.pb.go" g := gen.NewGeneratedFile(filename, file.GoImportPath) + // Attach all comments associated with the syntax field. + genLeadingComments(g, file.Desc.SourceLocations().ByPath(protoreflect.SourcePath{fileDescriptorProtoSyntaxFieldNumber})) g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.") + g.P("// versions:") + g.P("// - protoc-gen-go-grpc v", version) + g.P("// - protoc ", protocVersion(gen)) + if file.Proto.GetOptions().GetDeprecated() { + g.P("// ", file.Desc.Path(), " is a deprecated file.") + } else { + g.P("// source: ", file.Desc.Path()) + } g.P() + // Attach all comments associated with the package field. + genLeadingComments(g, file.Desc.SourceLocations().ByPath(protoreflect.SourcePath{fileDescriptorProtoPackageFieldNumber})) g.P("package ", file.GoPackageName) g.P() generateFileContent(gen, file, g) return g } +func protocVersion(gen *protogen.Plugin) string { + v := gen.Request.GetCompilerVersion() + if v == nil { + return "(unknown)" + } + var suffix string + if s := v.GetSuffix(); s != "" { + suffix = "-" + s + } + return fmt.Sprintf("v%d.%d.%d%s", v.GetMajor(), v.GetMinor(), v.GetPatch(), suffix) +} + // generateFileContent generates the gRPC service definitions, excluding the package statement. func generateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) { if len(file.Services) == 0 { @@ -67,13 +179,16 @@ func generateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen. } func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + // Full methods constants. + helper.genFullMethods(g, service) + + // Client interface. clientName := service.GoName + "Client" g.P("// ", clientName, " is the client API for ", service.GoName, " service.") g.P("//") g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.") - // Client interface. if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { g.P("//") g.P(deprecationComment) @@ -92,17 +207,14 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P() // Client structure. - g.P("type ", unexport(clientName), " struct {") - g.P("cc ", grpcPackage.Ident("ClientConnInterface")) - g.P("}") - g.P() + helper.generateClientStruct(g, clientName) // NewClient factory. if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { g.P(deprecationComment) } g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {") - g.P("return &", unexport(clientName), "{cc}") + helper.generateNewClientDefinitions(g, service, clientName) g.P("}") g.P() @@ -151,23 +263,7 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P() // Server Unimplemented struct for forward compatibility. - g.P("// Unimplemented", serverType, " ", mustOrShould, " be embedded to have forward compatible implementations.") - g.P("type Unimplemented", serverType, " struct {") - g.P("}") - g.P() - for _, method := range service.Methods { - nilArg := "" - if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { - nilArg = "nil," - } - g.P("func (Unimplemented", serverType, ") ", serverSignature(g, method), "{") - g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`) - g.P("}") - } - if *requireUnimplemented { - g.P("func (Unimplemented", serverType, ") mustEmbedUnimplemented", serverType, "() {}") - } - g.P() + helper.generateUnimplementedServerType(gen, file, g, service) // Unsafe Server interface to opt-out of forward compatibility. g.P("// Unsafe", serverType, " may be embedded to opt out of forward compatibility for this service.") @@ -187,51 +283,7 @@ func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.Generated g.P("}") g.P() - // Server handler implementations. - var handlerNames []string - for _, method := range service.Methods { - hname := genServerMethod(gen, file, g, method) - handlerNames = append(handlerNames, hname) - } - - // Service descriptor. - g.P("// ", serviceDescVar, " is the ", grpcPackage.Ident("ServiceDesc"), " for ", service.GoName, " service.") - g.P("// It's only intended for direct use with ", grpcPackage.Ident("RegisterService"), ",") - g.P("// and not to be introspected or modified (even as a copy)") - g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {") - g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",") - g.P("HandlerType: (*", serverType, ")(nil),") - g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{") - for i, method := range service.Methods { - if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { - continue - } - g.P("{") - g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",") - g.P("Handler: ", handlerNames[i], ",") - g.P("},") - } - g.P("},") - g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{") - for i, method := range service.Methods { - if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { - continue - } - g.P("{") - g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",") - g.P("Handler: ", handlerNames[i], ",") - if method.Desc.IsStreamingServer() { - g.P("ServerStreams: true,") - } - if method.Desc.IsStreamingClient() { - g.P("ClientStreams: true,") - } - g.P("},") - } - g.P("},") - g.P("Metadata: \"", file.Desc.Path(), "\",") - g.P("}") - g.P() + helper.generateServerFunctions(gen, file, g, service, serverType, serviceDescVar) } func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string { @@ -251,7 +303,7 @@ func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) { service := method.Parent - sname := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + fmSymbol := helper.formatFullMethodSymbol(service, method) if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { g.P(deprecationComment) @@ -259,7 +311,7 @@ func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{") if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { g.P("out := new(", method.Output.GoIdent, ")") - g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) + g.P(`err := c.cc.Invoke(ctx, `, fmSymbol, `, in, out, opts...)`) g.P("if err != nil { return nil, err }") g.P("return out, nil") g.P("}") @@ -268,7 +320,7 @@ func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene } streamType := unexport(service.GoName) + method.GoName + "Client" serviceDescVar := service.GoName + "_ServiceDesc" - g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], "`, sname, `", opts...)`) + g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], `, fmSymbol, `, opts...)`) g.P("if err != nil { return nil, err }") g.P("x := &", streamType, "{stream}") if !method.Desc.IsStreamingClient() { @@ -344,18 +396,60 @@ func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret } -func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method) string { +func genServiceDesc(file *protogen.File, g *protogen.GeneratedFile, serviceDescVar string, serverType string, service *protogen.Service, handlerNames []string) { + // Service descriptor. + g.P("// ", serviceDescVar, " is the ", grpcPackage.Ident("ServiceDesc"), " for ", service.GoName, " service.") + g.P("// It's only intended for direct use with ", grpcPackage.Ident("RegisterService"), ",") + g.P("// and not to be introspected or modified (even as a copy)") + g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {") + g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{") + for i, method := range service.Methods { + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{") + for i, method := range service.Methods { + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.Desc.IsStreamingServer() { + g.P("ServerStreams: true,") + } + if method.Desc.IsStreamingClient() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.Desc.Path(), "\",") + g.P("}") + g.P() +} + +func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, hnameFuncNameFormatter func(string) string) string { service := method.Parent hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName) if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { - g.P("func ", hname, "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {") + g.P("func ", hnameFuncNameFormatter(hname), "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {") g.P("in := new(", method.Input.GoIdent, ")") g.P("if err := dec(in); err != nil { return nil, err }") g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }") g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{") g.P("Server: srv,") - g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name())), ",") + fmSymbol := helper.formatFullMethodSymbol(service, method) + g.P("FullMethod: ", fmSymbol, ",") g.P("}") g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {") g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))") @@ -366,7 +460,7 @@ func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene return hname } streamType := unexport(service.GoName) + method.GoName + "Server" - g.P("func ", hname, "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") + g.P("func ", hnameFuncNameFormatter(hname), "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") if !method.Desc.IsStreamingClient() { g.P("m := new(", method.Input.GoIdent, ")") g.P("if err := stream.RecvMsg(m); err != nil { return err }") @@ -425,6 +519,17 @@ func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.Gene return hname } +func genLeadingComments(g *protogen.GeneratedFile, loc protoreflect.SourceLocation) { + for _, s := range loc.LeadingDetachedComments { + g.P(protogen.Comments(s)) + g.P() + } + if s := loc.LeadingComments; s != "" { + g.P(protogen.Comments(s)) + g.P() + } +} + const deprecationComment = "// Deprecated: Do not use." func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } diff --git a/cmd/protoc-gen-go-grpc/main.go b/cmd/protoc-gen-go-grpc/main.go index 7f104da7d068..340eaf3ee7bf 100644 --- a/cmd/protoc-gen-go-grpc/main.go +++ b/cmd/protoc-gen-go-grpc/main.go @@ -19,14 +19,17 @@ // protoc-gen-go-grpc is a plugin for the Google protocol buffer compiler to // generate Go code. Install it by building this program and making it // accessible within your PATH with the name: +// // protoc-gen-go-grpc // // The 'go-grpc' suffix becomes part of the argument for the protocol compiler, // such that it can be invoked as: +// // protoc --go-grpc_out=. path/to/file.proto // // This generates Go service definitions for the protocol buffer defined by // file.proto. With that input, the output will be written to: +// // path/to/file_grpc.pb.go package main @@ -38,7 +41,7 @@ import ( "google.golang.org/protobuf/types/pluginpb" ) -const version = "1.1.0" +const version = "1.3.0" var requireUnimplemented *bool diff --git a/codes/code_string.go b/codes/code_string.go index 0b206a57822a..934fac2b090a 100644 --- a/codes/code_string.go +++ b/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/connectivity/connectivity.go b/connectivity/connectivity.go index 010156261505..4a89926422bc 100644 --- a/connectivity/connectivity.go +++ b/connectivity/connectivity.go @@ -18,7 +18,6 @@ // Package connectivity defines connectivity semantics. // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. -// All APIs in this package are experimental. package connectivity import ( @@ -45,7 +44,7 @@ func (s State) String() string { return "SHUTDOWN" default: logger.Errorf("unknown connectivity state: %d", s) - return "Invalid-State" + return "INVALID_STATE" } } @@ -61,3 +60,35 @@ const ( // Shutdown indicates the ClientConn has started shutting down. Shutdown ) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/credentials/alts/alts_test.go b/credentials/alts/alts_test.go index cbb1656d20c3..20062fe77539 100644 --- a/credentials/alts/alts_test.go +++ b/credentials/alts/alts_test.go @@ -1,3 +1,4 @@ +//go:build linux || windows // +build linux windows /* @@ -21,18 +22,44 @@ package alts import ( + "context" "reflect" + "sync" "testing" + "time" "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/alts/internal/handshaker" + "google.golang.org/grpc/credentials/alts/internal/handshaker/service" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/credentials/alts/internal/testutil" "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/status" +) + +const ( + defaultTestLongTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond ) type s struct { grpctest.Tester } +func init() { + // The vmOnGCP global variable MUST be forced to true. Otherwise, if + // this test is run anywhere except on a GCP VM, then an ALTS handshake + // will immediately fail. + once.Do(func() {}) + vmOnGCP = true +} + func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } @@ -286,6 +313,61 @@ func (s) TestCheckRPCVersions(t *testing.T) { } } +// TestFullHandshake performs a full ALTS handshake between a test client and +// server, where both client and server offload to a local, fake handshaker +// service. +func (s) TestFullHandshake(t *testing.T) { + // Start the fake handshaker service and the server. + var wait sync.WaitGroup + defer wait.Wait() + stopHandshaker, handshakerAddress := startFakeHandshakerService(t, &wait) + defer stopHandshaker() + stopServer, serverAddress := startServer(t, handshakerAddress, &wait) + defer stopServer() + + // Ping the server, authenticating with ALTS. + establishAltsConnection(t, handshakerAddress, serverAddress) + + // Close open connections to the fake handshaker service. + if err := service.CloseForTesting(); err != nil { + t.Errorf("service.CloseForTesting() failed: %v", err) + } +} + +// TestConcurrentHandshakes performs a several, concurrent ALTS handshakes +// between a test client and server, where both client and server offload to a +// local, fake handshaker service. +func (s) TestConcurrentHandshakes(t *testing.T) { + // Set the max number of concurrent handshakes to 3, so that we can + // test the handshaker behavior when handshakes are queued by + // performing more than 3 concurrent handshakes (specifically, 10). + handshaker.ResetConcurrentHandshakeSemaphoreForTesting(3) + + // Start the fake handshaker service and the server. + var wait sync.WaitGroup + defer wait.Wait() + stopHandshaker, handshakerAddress := startFakeHandshakerService(t, &wait) + defer stopHandshaker() + stopServer, serverAddress := startServer(t, handshakerAddress, &wait) + defer stopServer() + + // Ping the server, authenticating with ALTS. + var waitForConnections sync.WaitGroup + for i := 0; i < 10; i++ { + waitForConnections.Add(1) + go func() { + establishAltsConnection(t, handshakerAddress, serverAddress) + waitForConnections.Done() + }() + } + waitForConnections.Wait() + + // Close open connections to the fake handshaker service. + if err := service.CloseForTesting(); err != nil { + t.Errorf("service.CloseForTesting() failed: %v", err) + } +} + func version(major, minor uint32) *altspb.RpcProtocolVersions_Version { return &altspb.RpcProtocolVersions_Version{ Major: major, @@ -299,3 +381,72 @@ func versions(minMajor, minMinor, maxMajor, maxMinor uint32) *altspb.RpcProtocol MaxRpcVersion: version(maxMajor, maxMinor), } } + +func establishAltsConnection(t *testing.T, handshakerAddress, serverAddress string) { + clientCreds := NewClientCreds(&ClientOptions{HandshakerServiceAddress: handshakerAddress}) + conn, err := grpc.Dial(serverAddress, grpc.WithTransportCredentials(clientCreds)) + if err != nil { + t.Fatalf("grpc.Dial(%v) failed: %v", serverAddress, err) + } + defer conn.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestLongTimeout) + defer cancel() + c := testgrpc.NewTestServiceClient(conn) + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + _, err = c.UnaryCall(ctx, &testpb.SimpleRequest{}) + if err == nil { + break + } + if code := status.Code(err); code == codes.Unavailable { + // The server is not ready yet. Try again. + continue + } + t.Fatalf("c.UnaryCall() failed: %v", err) + } +} + +func startFakeHandshakerService(t *testing.T, wait *sync.WaitGroup) (stop func(), address string) { + listener, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("LocalTCPListener() failed: %v", err) + } + s := grpc.NewServer() + altsgrpc.RegisterHandshakerServiceServer(s, &testutil.FakeHandshaker{}) + wait.Add(1) + go func() { + defer wait.Done() + if err := s.Serve(listener); err != nil { + t.Errorf("failed to serve: %v", err) + } + }() + return func() { s.Stop() }, listener.Addr().String() +} + +func startServer(t *testing.T, handshakerServiceAddress string, wait *sync.WaitGroup) (stop func(), address string) { + listener, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("LocalTCPListener() failed: %v", err) + } + serverOpts := &ServerOptions{HandshakerServiceAddress: handshakerServiceAddress} + creds := NewServerCreds(serverOpts) + s := grpc.NewServer(grpc.Creds(creds)) + testgrpc.RegisterTestServiceServer(s, &testServer{}) + wait.Add(1) + go func() { + defer wait.Done() + if err := s.Serve(listener); err != nil { + t.Errorf("s.Serve(%v) failed: %v", listener, err) + } + }() + return func() { s.Stop() }, listener.Addr().String() +} + +type testServer struct { + testgrpc.UnimplementedTestServiceServer +} + +func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{ + Payload: &testpb.Payload{}, + }, nil +} diff --git a/credentials/alts/internal/conn/record_test.go b/credentials/alts/internal/conn/record_test.go index 59d4f41e9e1c..0b4177a581ed 100644 --- a/credentials/alts/internal/conn/record_test.go +++ b/credentials/alts/internal/conn/record_test.go @@ -40,11 +40,15 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } +const ( + rekeyRecordProtocol = "ALTSRP_GCM_AES128_REKEY" +) + var ( - nextProtocols = []string{"ALTSRP_GCM_AES128"} + recordProtocols = []string{rekeyRecordProtocol} altsRecordFuncs = map[string]ALTSRecordFunc{ // ALTS handshaker protocols. - "ALTSRP_GCM_AES128": func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) { + rekeyRecordProtocol: func(s core.Side, keyData []byte) (ALTSRecordCrypto, error) { return NewAES128GCM(s, keyData) }, } @@ -77,7 +81,7 @@ func (c *testConn) Close() error { return nil } -func newTestALTSRecordConn(in, out *bytes.Buffer, side core.Side, np string, protected []byte) *conn { +func newTestALTSRecordConn(in, out *bytes.Buffer, side core.Side, rp string, protected []byte) *conn { key := []byte{ // 16 arbitrary bytes. 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x4f, 0x49} @@ -85,23 +89,23 @@ func newTestALTSRecordConn(in, out *bytes.Buffer, side core.Side, np string, pro in: in, out: out, } - c, err := NewConn(&tc, side, np, key, protected) + c, err := NewConn(&tc, side, rp, key, protected) if err != nil { panic(fmt.Sprintf("Unexpected error creating test ALTS record connection: %v", err)) } return c.(*conn) } -func newConnPair(np string, clientProtected []byte, serverProtected []byte) (client, server *conn) { +func newConnPair(rp string, clientProtected []byte, serverProtected []byte) (client, server *conn) { clientBuf := new(bytes.Buffer) serverBuf := new(bytes.Buffer) - clientConn := newTestALTSRecordConn(clientBuf, serverBuf, core.ClientSide, np, clientProtected) - serverConn := newTestALTSRecordConn(serverBuf, clientBuf, core.ServerSide, np, serverProtected) + clientConn := newTestALTSRecordConn(clientBuf, serverBuf, core.ClientSide, rp, clientProtected) + serverConn := newTestALTSRecordConn(serverBuf, clientBuf, core.ServerSide, rp, serverProtected) return clientConn, serverConn } -func testPingPong(t *testing.T, np string) { - clientConn, serverConn := newConnPair(np, nil, nil) +func testPingPong(t *testing.T, rp string) { + clientConn, serverConn := newConnPair(rp, nil, nil) clientMsg := []byte("Client Message") if n, err := clientConn.Write(clientMsg); n != len(clientMsg) || err != nil { t.Fatalf("Client Write() = %v, %v; want %v, ", n, err, len(clientMsg)) @@ -128,13 +132,13 @@ func testPingPong(t *testing.T, np string) { } func (s) TestPingPong(t *testing.T) { - for _, np := range nextProtocols { - testPingPong(t, np) + for _, rp := range recordProtocols { + testPingPong(t, rp) } } -func testSmallReadBuffer(t *testing.T, np string) { - clientConn, serverConn := newConnPair(np, nil, nil) +func testSmallReadBuffer(t *testing.T, rp string) { + clientConn, serverConn := newConnPair(rp, nil, nil) msg := []byte("Very Important Message") if n, err := clientConn.Write(msg); err != nil { t.Fatalf("Write() = %v, %v; want %v, ", n, err, len(msg)) @@ -155,13 +159,13 @@ func testSmallReadBuffer(t *testing.T, np string) { } func (s) TestSmallReadBuffer(t *testing.T) { - for _, np := range nextProtocols { - testSmallReadBuffer(t, np) + for _, rp := range recordProtocols { + testSmallReadBuffer(t, rp) } } -func testLargeMsg(t *testing.T, np string) { - clientConn, serverConn := newConnPair(np, nil, nil) +func testLargeMsg(t *testing.T, rp string) { + clientConn, serverConn := newConnPair(rp, nil, nil) // msgLen is such that the length in the framing is larger than the // default size of one frame. msgLen := altsRecordDefaultLength - msgTypeFieldSize - clientConn.crypto.EncryptionOverhead() + 1 @@ -179,12 +183,12 @@ func testLargeMsg(t *testing.T, np string) { } func (s) TestLargeMsg(t *testing.T) { - for _, np := range nextProtocols { - testLargeMsg(t, np) + for _, rp := range recordProtocols { + testLargeMsg(t, rp) } } -func testIncorrectMsgType(t *testing.T, np string) { +func testIncorrectMsgType(t *testing.T, rp string) { // framedMsg is an empty ciphertext with correct framing but wrong // message type. framedMsg := make([]byte, MsgLenFieldSize+msgTypeFieldSize) @@ -193,7 +197,7 @@ func testIncorrectMsgType(t *testing.T, np string) { binary.LittleEndian.PutUint32(framedMsg[MsgLenFieldSize:], wrongMsgType) in := bytes.NewBuffer(framedMsg) - c := newTestALTSRecordConn(in, nil, core.ClientSide, np, nil) + c := newTestALTSRecordConn(in, nil, core.ClientSide, rp, nil) b := make([]byte, 1) if n, err := c.Read(b); n != 0 || err == nil { t.Fatalf("Read() = , want %v", fmt.Errorf("received frame with incorrect message type %v", wrongMsgType)) @@ -201,22 +205,22 @@ func testIncorrectMsgType(t *testing.T, np string) { } func (s) TestIncorrectMsgType(t *testing.T) { - for _, np := range nextProtocols { - testIncorrectMsgType(t, np) + for _, rp := range recordProtocols { + testIncorrectMsgType(t, rp) } } -func testFrameTooLarge(t *testing.T, np string) { +func testFrameTooLarge(t *testing.T, rp string) { buf := new(bytes.Buffer) - clientConn := newTestALTSRecordConn(nil, buf, core.ClientSide, np, nil) - serverConn := newTestALTSRecordConn(buf, nil, core.ServerSide, np, nil) + clientConn := newTestALTSRecordConn(nil, buf, core.ClientSide, rp, nil) + serverConn := newTestALTSRecordConn(buf, nil, core.ServerSide, rp, nil) // payloadLen is such that the length in the framing is larger than // allowed in one frame. payloadLen := altsRecordLengthLimit - msgTypeFieldSize - clientConn.crypto.EncryptionOverhead() + 1 payload := make([]byte, payloadLen) c, err := clientConn.crypto.Encrypt(nil, payload) if err != nil { - t.Fatalf(fmt.Sprintf("Error encrypting message: %v", err)) + t.Fatalf("Error encrypting message: %v", err) } msgLen := msgTypeFieldSize + len(c) framedMsg := make([]byte, MsgLenFieldSize+msgLen) @@ -225,7 +229,7 @@ func testFrameTooLarge(t *testing.T, np string) { binary.LittleEndian.PutUint32(msg[:msgTypeFieldSize], altsRecordMsgType) copy(msg[msgTypeFieldSize:], c) if _, err = buf.Write(framedMsg); err != nil { - t.Fatal(fmt.Sprintf("Unexpected error writing to buffer: %v", err)) + t.Fatalf("Unexpected error writing to buffer: %v", err) } b := make([]byte, 1) if n, err := serverConn.Read(b); n != 0 || err == nil { @@ -234,15 +238,15 @@ func testFrameTooLarge(t *testing.T, np string) { } func (s) TestFrameTooLarge(t *testing.T) { - for _, np := range nextProtocols { - testFrameTooLarge(t, np) + for _, rp := range recordProtocols { + testFrameTooLarge(t, rp) } } -func testWriteLargeData(t *testing.T, np string) { +func testWriteLargeData(t *testing.T, rp string) { // Test sending and receiving messages larger than the maximum write // buffer size. - clientConn, serverConn := newConnPair(np, nil, nil) + clientConn, serverConn := newConnPair(rp, nil, nil) // Message size is intentionally chosen to not be multiple of // payloadLengthLimtit. msgSize := altsWriteBufferMaxSize + (100 * 1024) @@ -277,25 +281,25 @@ func testWriteLargeData(t *testing.T, np string) { } func (s) TestWriteLargeData(t *testing.T) { - for _, np := range nextProtocols { - testWriteLargeData(t, np) + for _, rp := range recordProtocols { + testWriteLargeData(t, rp) } } -func testProtectedBuffer(t *testing.T, np string) { +func testProtectedBuffer(t *testing.T, rp string) { key := []byte{ // 16 arbitrary bytes. 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x4f, 0x49} // Encrypt a message to be passed to NewConn as a client-side protected // buffer. - newCrypto := protocols[np] + newCrypto := protocols[rp] if newCrypto == nil { - t.Fatalf("Unknown next protocol %q", np) + t.Fatalf("Unknown record protocol %q", rp) } crypto, err := newCrypto(core.ClientSide, key) if err != nil { - t.Fatalf("Failed to create a crypter for protocol %q: %v", np, err) + t.Fatalf("Failed to create a crypter for protocol %q: %v", rp, err) } msg := []byte("Client Protected Message") encryptedMsg, err := crypto.Encrypt(nil, msg) @@ -307,7 +311,7 @@ func testProtectedBuffer(t *testing.T, np string) { binary.LittleEndian.PutUint32(protectedMsg[4:], altsRecordMsgType) protectedMsg = append(protectedMsg, encryptedMsg...) - _, serverConn := newConnPair(np, nil, protectedMsg) + _, serverConn := newConnPair(rp, nil, protectedMsg) rcvClientMsg := make([]byte, len(msg)) if n, err := serverConn.Read(rcvClientMsg); n != len(rcvClientMsg) || err != nil { t.Fatalf("Server Read() = %v, %v; want %v, ", n, err, len(rcvClientMsg)) @@ -318,7 +322,7 @@ func testProtectedBuffer(t *testing.T, np string) { } func (s) TestProtectedBuffer(t *testing.T) { - for _, np := range nextProtocols { - testProtectedBuffer(t, np) + for _, rp := range recordProtocols { + testProtectedBuffer(t, rp) } } diff --git a/credentials/alts/internal/handshaker/handshaker.go b/credentials/alts/internal/handshaker/handshaker.go index 8bc7ceee0aff..0854e7af6518 100644 --- a/credentials/alts/internal/handshaker/handshaker.go +++ b/credentials/alts/internal/handshaker/handshaker.go @@ -25,8 +25,8 @@ import ( "fmt" "io" "net" - "sync" + "golang.org/x/sync/semaphore" grpc "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -35,15 +35,13 @@ import ( "google.golang.org/grpc/credentials/alts/internal/conn" altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + "google.golang.org/grpc/internal/envconfig" ) const ( // The maximum byte size of receive frames. frameLimit = 64 * 1024 // 64 KB rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY" - // maxPendingHandshakes represents the maximum number of concurrent - // handshakes. - maxPendingHandshakes = 100 ) var ( @@ -59,9 +57,9 @@ var ( return conn.NewAES128GCMRekey(s, keyData) }, } - // control number of concurrent created (but not closed) handshakers. - mu sync.Mutex - concurrentHandshakes = int64(0) + // control number of concurrent created (but not closed) handshakes. + clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) + serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) // errDropped occurs when maxPendingHandshakes is reached. errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") // errOutOfBound occurs when the handshake service returns a consumed @@ -77,30 +75,6 @@ func init() { } } -func acquire() bool { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - success := maxPendingHandshakes-concurrentHandshakes >= n - if success { - concurrentHandshakes += n - } - mu.Unlock() - return success -} - -func release() { - mu.Lock() - // If we need n to be configurable, we can pass it as an argument. - n := int64(1) - concurrentHandshakes -= n - if concurrentHandshakes < 0 { - mu.Unlock() - panic("bad release") - } - mu.Unlock() -} - // ClientHandshakerOptions contains the client handshaker options that can // provided by the caller. type ClientHandshakerOptions struct { @@ -134,11 +108,7 @@ func DefaultServerHandshakerOptions() *ServerHandshakerOptions { return &ServerHandshakerOptions{} } -// TODO: add support for future local and remote endpoint in both client options -// and server options (server options struct does not exist now. When -// caller can provide endpoints, it should be created. - -// altsHandshaker is used to complete a ALTS handshaking between client and +// altsHandshaker is used to complete an ALTS handshake between client and // server. This handshaker talks to the ALTS handshaker service in the metadata // server. type altsHandshaker struct { @@ -146,6 +116,8 @@ type altsHandshaker struct { stream altsgrpc.HandshakerService_DoHandshakeClient // the connection to the peer. conn net.Conn + // a virtual connection to the ALTS handshaker service. + clientConn *grpc.ClientConn // client handshake options. clientOpts *ClientHandshakerOptions // server handshake options. @@ -154,50 +126,54 @@ type altsHandshaker struct { side core.Side } -// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker +// NewClientHandshaker creates a core.Handshaker that performs a client-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } return &altsHandshaker{ - stream: stream, + stream: nil, conn: c, + clientConn: conn, clientOpts: opts, side: core.ClientSide, }, nil } -// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC -// stub created using the passed conn and used to talk to the ALTS Handshaker +// NewServerHandshaker creates a core.Handshaker that performs a server-side +// ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { - stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true)) - if err != nil { - return nil, err - } return &altsHandshaker{ - stream: stream, + stream: nil, conn: c, + clientConn: conn, serverOpts: opts, side: core.ServerSide, }, nil } -// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once +// ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !clientHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer clientHandshakes.Release(1) if h.side != core.ClientSide { return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker") } + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + // Create target identities from service account list. targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts)) for _, account := range h.clientOpts.TargetServiceAccounts { @@ -229,18 +205,28 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent return conn, authInfo, nil } -// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once +// ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !acquire() { + if !serverHandshakes.TryAcquire(1) { return nil, nil, errDropped } - defer release() + defer serverHandshakes.Release(1) if h.side != core.ServerSide { return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker") } + // TODO(matthewstevenson88): Change unit tests to use public APIs so + // that h.stream can unconditionally be set based on h.clientConn. + if h.stream == nil { + stream, err := altsgrpc.NewHandshakerServiceClient(h.clientConn).DoHandshake(ctx) + if err != nil { + return nil, nil, fmt.Errorf("failed to establish stream to ALTS handshaker service: %v", err) + } + h.stream = stream + } + p := make([]byte, frameLimit) n, err := h.conn.Read(p) if err != nil { @@ -248,8 +234,6 @@ func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credent } // Prepare server parameters. - // TODO: currently only ALTS parameters are provided. Might need to use - // more options in the future. params := make(map[int32]*altspb.ServerHandshakeParameters) params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{ RecordProtocols: recordProtocols, @@ -371,5 +355,14 @@ func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []b // Close terminates the Handshaker. It should be called when the caller obtains // the secure connection. func (h *altsHandshaker) Close() { - h.stream.CloseSend() + if h.stream != nil { + h.stream.CloseSend() + } +} + +// ResetConcurrentHandshakeSemaphoreForTesting resets the handshake semaphores +// to allow numberOfAllowedHandshakes concurrent handshakes each. +func ResetConcurrentHandshakeSemaphoreForTesting(numberOfAllowedHandshakes int64) { + clientHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) + serverHandshakes = semaphore.NewWeighted(numberOfAllowedHandshakes) } diff --git a/credentials/alts/internal/handshaker/handshaker_test.go b/credentials/alts/internal/handshaker/handshaker_test.go index bf516dc53c87..40d66161c7b6 100644 --- a/credentials/alts/internal/handshaker/handshaker_test.go +++ b/credentials/alts/internal/handshaker/handshaker_test.go @@ -21,13 +21,17 @@ package handshaker import ( "bytes" "context" + "errors" "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" grpc "google.golang.org/grpc" core "google.golang.org/grpc/credentials/alts/internal" altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" "google.golang.org/grpc/credentials/alts/internal/testutil" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpctest" ) @@ -131,7 +135,7 @@ func (s) TestClientHandshake(t *testing.T) { numberOfHandshakes int }{ {0 * time.Millisecond, 1}, - {100 * time.Millisecond, 10 * maxPendingHandshakes}, + {100 * time.Millisecond, 10 * int(envconfig.ALTSMaxConcurrentHandshakes)}, } { errc := make(chan error) stat.Reset() @@ -163,7 +167,8 @@ func (s) TestClientHandshake(t *testing.T) { go func() { _, context, err := chs.ClientHandshake(ctx) if err == nil && context == nil { - panic("expected non-nil ALTS context") + errc <- errors.New("expected non-nil ALTS context") + return } errc <- err chs.Close() @@ -178,8 +183,8 @@ func (s) TestClientHandshake(t *testing.T) { } // Ensure that there are no concurrent calls more than the limit. - if stat.MaxConcurrentCalls > maxPendingHandshakes { - t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, maxPendingHandshakes) + if stat.MaxConcurrentCalls > int(envconfig.ALTSMaxConcurrentHandshakes) { + t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, envconfig.ALTSMaxConcurrentHandshakes) } } } @@ -190,7 +195,7 @@ func (s) TestServerHandshake(t *testing.T) { numberOfHandshakes int }{ {0 * time.Millisecond, 1}, - {100 * time.Millisecond, 10 * maxPendingHandshakes}, + {100 * time.Millisecond, 10 * int(envconfig.ALTSMaxConcurrentHandshakes)}, } { errc := make(chan error) stat.Reset() @@ -219,7 +224,8 @@ func (s) TestServerHandshake(t *testing.T) { go func() { _, context, err := shs.ServerHandshake(ctx) if err == nil && context == nil { - panic("expected non-nil ALTS context") + errc <- errors.New("expected non-nil ALTS context") + return } errc <- err shs.Close() @@ -234,8 +240,8 @@ func (s) TestServerHandshake(t *testing.T) { } // Ensure that there are no concurrent calls more than the limit. - if stat.MaxConcurrentCalls > maxPendingHandshakes { - t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, maxPendingHandshakes) + if stat.MaxConcurrentCalls > int(envconfig.ALTSMaxConcurrentHandshakes) { + t.Errorf("Observed %d concurrent handshakes; want <= %d", stat.MaxConcurrentCalls, envconfig.ALTSMaxConcurrentHandshakes) } } } @@ -280,3 +286,67 @@ func (s) TestPeerNotResponding(t *testing.T) { t.Errorf("ClientHandshake() = %v, want %v", got, want) } } + +func (s) TestNewClientHandshaker(t *testing.T) { + conn := testutil.NewTestConn(nil, nil) + clientConn := &grpc.ClientConn{} + opts := &ClientHandshakerOptions{} + hs, err := NewClientHandshaker(context.Background(), clientConn, conn, opts) + if err != nil { + t.Errorf("NewClientHandshaker returned unexpected error: %v", err) + } + expectedHs := &altsHandshaker{ + stream: nil, + conn: conn, + clientConn: clientConn, + clientOpts: opts, + serverOpts: nil, + side: core.ClientSide, + } + cmpOpts := []cmp.Option{ + cmp.AllowUnexported(altsHandshaker{}), + cmpopts.IgnoreFields(altsHandshaker{}, "conn", "clientConn"), + } + if got, want := hs.(*altsHandshaker), expectedHs; !cmp.Equal(got, want, cmpOpts...) { + t.Errorf("NewClientHandshaker() returned unexpected handshaker: got: %v, want: %v", got, want) + } + if hs.(*altsHandshaker).stream != nil { + t.Errorf("NewClientHandshaker() returned handshaker with non-nil stream") + } + if hs.(*altsHandshaker).clientConn != clientConn { + t.Errorf("NewClientHandshaker() returned handshaker with unexpected clientConn") + } + hs.Close() +} + +func (s) TestNewServerHandshaker(t *testing.T) { + conn := testutil.NewTestConn(nil, nil) + clientConn := &grpc.ClientConn{} + opts := &ServerHandshakerOptions{} + hs, err := NewServerHandshaker(context.Background(), clientConn, conn, opts) + if err != nil { + t.Errorf("NewServerHandshaker returned unexpected error: %v", err) + } + expectedHs := &altsHandshaker{ + stream: nil, + conn: conn, + clientConn: clientConn, + clientOpts: nil, + serverOpts: opts, + side: core.ServerSide, + } + cmpOpts := []cmp.Option{ + cmp.AllowUnexported(altsHandshaker{}), + cmpopts.IgnoreFields(altsHandshaker{}, "conn", "clientConn"), + } + if got, want := hs.(*altsHandshaker), expectedHs; !cmp.Equal(got, want, cmpOpts...) { + t.Errorf("NewServerHandshaker() returned unexpected handshaker: got: %v, want: %v", got, want) + } + if hs.(*altsHandshaker).stream != nil { + t.Errorf("NewServerHandshaker() returned handshaker with non-nil stream") + } + if hs.(*altsHandshaker).clientConn != clientConn { + t.Errorf("NewServerHandshaker() returned handshaker with unexpected clientConn") + } + hs.Close() +} diff --git a/credentials/alts/internal/handshaker/service/service.go b/credentials/alts/internal/handshaker/service/service.go index 77d759cd956f..e1cdafb980cd 100644 --- a/credentials/alts/internal/handshaker/service/service.go +++ b/credentials/alts/internal/handshaker/service/service.go @@ -24,6 +24,7 @@ import ( "sync" grpc "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) var ( @@ -49,7 +50,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithInsecure()) + hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } @@ -57,3 +58,21 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { } return hsConn, nil } + +// CloseForTesting closes all open connections to the handshaker service. +// +// For testing purposes only. +func CloseForTesting() error { + for _, hsConn := range hsConnMap { + if hsConn == nil { + continue + } + if err := hsConn.Close(); err != nil { + return err + } + } + + // Reset the connection map. + hsConnMap = make(map[string]*grpc.ClientConn) + return nil +} diff --git a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 703b48da753b..83e3bae37b17 100644 --- a/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/gcp/altscontext.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type AltsContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 40570e9bf2de..0b0093328bff 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/gcp/handshaker.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HandshakeProtocol int32 const ( @@ -216,6 +211,7 @@ type Identity struct { unknownFields protoimpl.UnknownFields // Types that are assignable to IdentityOneof: + // // *Identity_ServiceAccount // *Identity_Hostname IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` @@ -664,6 +660,7 @@ type HandshakerReq struct { unknownFields protoimpl.UnknownFields // Types that are assignable to ReqOneof: + // // *HandshakerReq_ClientStart // *HandshakerReq_ServerStart // *HandshakerReq_Next diff --git a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index efdbd13fa304..39ecccf878ee 100644 --- a/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,4 +1,25 @@ +// Copyright 2018 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/gcp/handshaker.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/gcp/handshaker.proto package grpc_gcp @@ -14,6 +35,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + HandshakerService_DoHandshake_FullMethodName = "/grpc.gcp.HandshakerService/DoHandshake" +) + // HandshakerServiceClient is the client API for HandshakerService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -36,7 +61,7 @@ func NewHandshakerServiceClient(cc grpc.ClientConnInterface) HandshakerServiceCl } func (c *handshakerServiceClient) DoHandshake(ctx context.Context, opts ...grpc.CallOption) (HandshakerService_DoHandshakeClient, error) { - stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], "/grpc.gcp.HandshakerService/DoHandshake", opts...) + stream, err := c.cc.NewStream(ctx, &HandshakerService_ServiceDesc.Streams[0], HandshakerService_DoHandshake_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 4fc3c79d6a39..c2e564c7ded4 100644 --- a/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The security level of the created channel. The list is sorted in increasing // level of security. This order must always be maintained. type SecurityLevel int32 diff --git a/credentials/alts/internal/testutil/testutil.go b/credentials/alts/internal/testutil/testutil.go index e114719d5a88..cdc88c8f9da0 100644 --- a/credentials/alts/internal/testutil/testutil.go +++ b/credentials/alts/internal/testutil/testutil.go @@ -22,11 +22,15 @@ package testutil import ( "bytes" "encoding/binary" + "fmt" "io" "net" "sync" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/alts/internal/conn" + altsgrpc "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" + altspb "google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp" ) // Stats is used to collect statistics about concurrent handshake calls. @@ -123,3 +127,178 @@ func MakeFrame(pl string) []byte { copy(f[conn.MsgLenFieldSize:], []byte(pl)) return f } + +// FakeHandshaker is a fake implementation of the ALTS handshaker service. +type FakeHandshaker struct { + altsgrpc.HandshakerServiceServer +} + +// DoHandshake performs a fake ALTS handshake. +func (h *FakeHandshaker) DoHandshake(stream altsgrpc.HandshakerService_DoHandshakeServer) error { + var isAssistingClient bool + var handshakeFramesReceivedSoFar []byte + for { + req, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("stream recv failure: %v", err) + } + var resp *altspb.HandshakerResp + switch req := req.ReqOneof.(type) { + case *altspb.HandshakerReq_ClientStart: + isAssistingClient = true + resp, err = h.processStartClient(req.ClientStart) + if err != nil { + return fmt.Errorf("processStartClient failure: %v", err) + } + case *altspb.HandshakerReq_ServerStart: + // If we have received the full ClientInit, send the ServerInit and + // ServerFinished. Otherwise, wait for more bytes to arrive from the client. + isAssistingClient = false + handshakeFramesReceivedSoFar = append(handshakeFramesReceivedSoFar, req.ServerStart.InBytes...) + sendHandshakeFrame := bytes.Equal(handshakeFramesReceivedSoFar, []byte("ClientInit")) + resp, err = h.processServerStart(req.ServerStart, sendHandshakeFrame) + if err != nil { + return fmt.Errorf("processServerStart failure: %v", err) + } + case *altspb.HandshakerReq_Next: + // If we have received all handshake frames, send the handshake result. + // Otherwise, wait for more bytes to arrive from the peer. + oldHandshakesBytes := len(handshakeFramesReceivedSoFar) + handshakeFramesReceivedSoFar = append(handshakeFramesReceivedSoFar, req.Next.InBytes...) + isHandshakeComplete := false + if isAssistingClient { + isHandshakeComplete = bytes.HasPrefix(handshakeFramesReceivedSoFar, []byte("ServerInitServerFinished")) + } else { + isHandshakeComplete = bytes.HasPrefix(handshakeFramesReceivedSoFar, []byte("ClientInitClientFinished")) + } + if !isHandshakeComplete { + resp = &altspb.HandshakerResp{ + BytesConsumed: uint32(len(handshakeFramesReceivedSoFar) - oldHandshakesBytes), + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + } + break + } + resp, err = h.getHandshakeResult(isAssistingClient) + if err != nil { + return fmt.Errorf("getHandshakeResult failure: %v", err) + } + default: + return fmt.Errorf("handshake request has unexpected type: %v", req) + } + + if err = stream.Send(resp); err != nil { + return fmt.Errorf("stream send failure: %v", err) + } + } +} + +func (h *FakeHandshaker) processStartClient(req *altspb.StartClientHandshakeReq) (*altspb.HandshakerResp, error) { + if req.HandshakeSecurityProtocol != altspb.HandshakeProtocol_ALTS { + return nil, fmt.Errorf("unexpected handshake security protocol: %v", req.HandshakeSecurityProtocol) + } + if len(req.ApplicationProtocols) != 1 || req.ApplicationProtocols[0] != "grpc" { + return nil, fmt.Errorf("unexpected application protocols: %v", req.ApplicationProtocols) + } + if len(req.RecordProtocols) != 1 || req.RecordProtocols[0] != "ALTSRP_GCM_AES128_REKEY" { + return nil, fmt.Errorf("unexpected record protocols: %v", req.RecordProtocols) + } + return &altspb.HandshakerResp{ + OutFrames: []byte("ClientInit"), + BytesConsumed: 0, + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil +} + +func (h *FakeHandshaker) processServerStart(req *altspb.StartServerHandshakeReq, sendHandshakeFrame bool) (*altspb.HandshakerResp, error) { + if len(req.ApplicationProtocols) != 1 || req.ApplicationProtocols[0] != "grpc" { + return nil, fmt.Errorf("unexpected application protocols: %v", req.ApplicationProtocols) + } + parameters, ok := req.GetHandshakeParameters()[int32(altspb.HandshakeProtocol_ALTS)] + if !ok { + return nil, fmt.Errorf("missing ALTS handshake parameters") + } + if len(parameters.RecordProtocols) != 1 || parameters.RecordProtocols[0] != "ALTSRP_GCM_AES128_REKEY" { + return nil, fmt.Errorf("unexpected record protocols: %v", parameters.RecordProtocols) + } + if sendHandshakeFrame { + return &altspb.HandshakerResp{ + OutFrames: []byte("ServerInitServerFinished"), + BytesConsumed: uint32(len(req.InBytes)), + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil + } + return &altspb.HandshakerResp{ + OutFrames: []byte("ServerInitServerFinished"), + BytesConsumed: 10, + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil +} + +func (h *FakeHandshaker) getHandshakeResult(isAssistingClient bool) (*altspb.HandshakerResp, error) { + if isAssistingClient { + return &altspb.HandshakerResp{ + OutFrames: []byte("ClientFinished"), + BytesConsumed: 24, + Result: &altspb.HandshakerResult{ + ApplicationProtocol: "grpc", + RecordProtocol: "ALTSRP_GCM_AES128_REKEY", + KeyData: []byte("negotiated-key-data-for-altsrp-gcm-aes128-rekey"), + PeerIdentity: &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: "server@bar.com", + }, + }, + PeerRpcVersions: &altspb.RpcProtocolVersions{ + MaxRpcVersion: &altspb.RpcProtocolVersions_Version{ + Minor: 1, + Major: 2, + }, + MinRpcVersion: &altspb.RpcProtocolVersions_Version{ + Minor: 1, + Major: 2, + }, + }, + }, + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil + } + return &altspb.HandshakerResp{ + BytesConsumed: 14, + Result: &altspb.HandshakerResult{ + ApplicationProtocol: "grpc", + RecordProtocol: "ALTSRP_GCM_AES128_REKEY", + KeyData: []byte("negotiated-key-data-for-altsrp-gcm-aes128-rekey"), + PeerIdentity: &altspb.Identity{ + IdentityOneof: &altspb.Identity_ServiceAccount{ + ServiceAccount: "client@baz.com", + }, + }, + PeerRpcVersions: &altspb.RpcProtocolVersions{ + MaxRpcVersion: &altspb.RpcProtocolVersions_Version{ + Minor: 1, + Major: 2, + }, + MinRpcVersion: &altspb.RpcProtocolVersions_Version{ + Minor: 1, + Major: 2, + }, + }, + }, + Status: &altspb.HandshakerStatus{ + Code: uint32(codes.OK), + }, + }, nil +} diff --git a/credentials/alts/utils_test.go b/credentials/alts/utils_test.go index 2231de3dccc4..531cdfce6e3a 100644 --- a/credentials/alts/utils_test.go +++ b/credentials/alts/utils_test.go @@ -1,3 +1,4 @@ +//go:build linux || windows // +build linux windows /* diff --git a/credentials/credentials.go b/credentials/credentials.go index e69562e78786..5feac3aa0e41 100644 --- a/credentials/credentials.go +++ b/credentials/credentials.go @@ -30,22 +30,22 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" ) // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. @@ -140,6 +140,11 @@ type TransportCredentials interface { // Additionally, ClientHandshakeInfo data will be available via the context // passed to this call. // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns @@ -153,9 +158,13 @@ type TransportCredentials interface { Info() ProtocolInfo // Clone makes a copy of this TransportCredentials. Clone() TransportCredentials - // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. - // gRPC internals also use it to override the virtual hosting name if it is set. - // It must be called before dialing. Currently, this is only used by grpclb. + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. OverrideServerName(string) error } @@ -169,8 +178,18 @@ type TransportCredentials interface { // // This API is experimental. type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. // @@ -188,15 +207,12 @@ type RequestInfo struct { AuthInfo AuthInfo } -// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. -type requestInfoKey struct{} - // RequestInfoFromContext extracts the RequestInfo from the context if it exists. // // This API is experimental. func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { - ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) - return + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok } // ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes @@ -211,16 +227,12 @@ type ClientHandshakeInfo struct { Attributes *attributes.Attributes } -// clientHandshakeInfoKey is a struct used as the key to store -// ClientHandshakeInfo in a context. -type clientHandshakeInfoKey struct{} - // ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored // in ctx. // // This API is experimental. func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { - chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) return chi } @@ -249,15 +261,6 @@ func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { return nil } -func init() { - internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { - return context.WithValue(ctx, requestInfoKey{}, ri) - } - internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { - return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) - } -} - // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. // diff --git a/credentials/google/google.go b/credentials/google/google.go index 7f3e240e475b..fbdf7dc2997a 100644 --- a/credentials/google/google.go +++ b/credentials/google/google.go @@ -35,57 +35,63 @@ const tokenRequestTimeout = 30 * time.Second var logger = grpclog.Component("credentials") -// NewDefaultCredentials returns a credentials bundle that is configured to work -// with google services. +// DefaultCredentialsOptions constructs options to build DefaultCredentials. +type DefaultCredentialsOptions struct { + // PerRPCCreds is a per RPC credentials that is passed to a bundle. + PerRPCCreds credentials.PerRPCCredentials +} + +// NewDefaultCredentialsWithOptions returns a credentials bundle that is +// configured to work with google services. // // This API is experimental. -func NewDefaultCredentials() credentials.Bundle { - c := &creds{ - newPerRPCCreds: func() credentials.PerRPCCredentials { - ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) - defer cancel() - perRPCCreds, err := oauth.NewApplicationDefault(ctx) - if err != nil { - logger.Warningf("google default creds: failed to create application oauth: %v", err) - } - return perRPCCreds - }, +func NewDefaultCredentialsWithOptions(opts DefaultCredentialsOptions) credentials.Bundle { + if opts.PerRPCCreds == nil { + ctx, cancel := context.WithTimeout(context.Background(), tokenRequestTimeout) + defer cancel() + var err error + opts.PerRPCCreds, err = newADC(ctx) + if err != nil { + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create application oauth: %v", err) + } } + c := &creds{opts: opts} bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) if err != nil { - logger.Warningf("google default creds: failed to create new creds: %v", err) + logger.Warningf("NewDefaultCredentialsWithOptions: failed to create new creds: %v", err) } return bundle } +// NewDefaultCredentials returns a credentials bundle that is configured to work +// with google services. +// +// This API is experimental. +func NewDefaultCredentials() credentials.Bundle { + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}) +} + // NewComputeEngineCredentials returns a credentials bundle that is configured to work // with google services. This API must only be used when running on GCE. Authentication configured // by this API represents the GCE VM's default service account. // // This API is experimental. func NewComputeEngineCredentials() credentials.Bundle { - c := &creds{ - newPerRPCCreds: func() credentials.PerRPCCredentials { - return oauth.NewComputeEngine() - }, - } - bundle, err := c.NewWithMode(internal.CredsBundleModeFallback) - if err != nil { - logger.Warningf("compute engine creds: failed to create new creds: %v", err) - } - return bundle + return NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{ + PerRPCCreds: oauth.NewComputeEngine(), + }) } // creds implements credentials.Bundle. type creds struct { + opts DefaultCredentialsOptions + // Supported modes are defined in internal/internal.go. mode string - // The transport credentials associated with this bundle. + // The active transport credentials associated with this bundle. transportCreds credentials.TransportCredentials - // The per RPC credentials associated with this bundle. + // The active per RPC credentials associated with this bundle. perRPCCreds credentials.PerRPCCredentials - // Creates new per RPC credentials - newPerRPCCreds func() credentials.PerRPCCredentials } func (c *creds) TransportCredentials() credentials.TransportCredentials { @@ -99,28 +105,40 @@ func (c *creds) PerRPCCredentials() credentials.PerRPCCredentials { return c.perRPCCreds } +var ( + newTLS = func() credentials.TransportCredentials { + return credentials.NewTLS(nil) + } + newALTS = func() credentials.TransportCredentials { + return alts.NewClientCreds(alts.DefaultClientOptions()) + } + newADC = func(ctx context.Context) (credentials.PerRPCCredentials, error) { + return oauth.NewApplicationDefault(ctx) + } +) + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the // existing Bundle may cause races. func (c *creds) NewWithMode(mode string) (credentials.Bundle, error) { newCreds := &creds{ - mode: mode, - newPerRPCCreds: c.newPerRPCCreds, + opts: c.opts, + mode: mode, } // Create transport credentials. switch mode { case internal.CredsBundleModeFallback: - newCreds.transportCreds = credentials.NewTLS(nil) + newCreds.transportCreds = newClusterTransportCreds(newTLS(), newALTS()) case internal.CredsBundleModeBackendFromBalancer, internal.CredsBundleModeBalancer: // Only the clients can use google default credentials, so we only need // to create new ALTS client creds here. - newCreds.transportCreds = alts.NewClientCreds(alts.DefaultClientOptions()) + newCreds.transportCreds = newALTS() default: return nil, fmt.Errorf("unsupported mode: %v", mode) } if mode == internal.CredsBundleModeFallback || mode == internal.CredsBundleModeBackendFromBalancer { - newCreds.perRPCCreds = newCreds.newPerRPCCreds() + newCreds.perRPCCreds = newCreds.opts.PerRPCCreds } return newCreds, nil diff --git a/credentials/google/google_test.go b/credentials/google/google_test.go new file mode 100644 index 000000000000..1809d545d0ec --- /dev/null +++ b/credentials/google/google_test.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package google + +import ( + "context" + "net" + "testing" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/resolver" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type testCreds struct { + credentials.TransportCredentials + typ string +} + +func (c *testCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return nil, &testAuthInfo{typ: c.typ}, nil +} + +func (c *testCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return nil, &testAuthInfo{typ: c.typ}, nil +} + +type testAuthInfo struct { + typ string +} + +func (t *testAuthInfo) AuthType() string { + return t.typ +} + +var ( + testTLS = &testCreds{typ: "tls"} + testALTS = &testCreds{typ: "alts"} +) + +func overrideNewCredsFuncs() func() { + origNewTLS := newTLS + newTLS = func() credentials.TransportCredentials { + return testTLS + } + origNewALTS := newALTS + newALTS = func() credentials.TransportCredentials { + return testALTS + } + origNewADC := newADC + newADC = func(context.Context) (credentials.PerRPCCredentials, error) { + // We do not use perRPC creds in this test. It is safe to return nil here. + return nil, nil + } + + return func() { + newTLS = origNewTLS + newALTS = origNewALTS + newADC = origNewADC + } +} + +// TestClientHandshakeBasedOnClusterName that by default (without switching +// modes), ClientHandshake does either tls or alts base on the cluster name in +// attributes. +func (s) TestClientHandshakeBasedOnClusterName(t *testing.T) { + defer overrideNewCredsFuncs()() + for bundleTyp, tc := range map[string]credentials.Bundle{ + "defaultCredsWithOptions": NewDefaultCredentialsWithOptions(DefaultCredentialsOptions{}), + "defaultCreds": NewDefaultCredentials(), + "computeCreds": NewComputeEngineCredentials(), + } { + tests := []struct { + name string + ctx context.Context + wantTyp string + }{ + { + name: "no cluster name", + ctx: context.Background(), + wantTyp: "tls", + }, + { + name: "with non-CFE cluster name", + ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, "lalala").Attributes, + }), + // non-CFE backends should use alts. + wantTyp: "alts", + }, + { + name: "with CFE cluster name", + ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, "google_cfe_bigtable.googleapis.com").Attributes, + }), + // CFE should use tls. + wantTyp: "tls", + }, + { + name: "with xdstp CFE cluster name", + ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_bigtable.googleapis.com").Attributes, + }), + // CFE should use tls. + wantTyp: "tls", + }, + { + name: "with xdstp non-CFE cluster name", + ctx: icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, "xdstp://other.com/envoy.config.cluster.v3.Cluster/google_cfe_bigtable.googleapis.com").Attributes, + }), + // non-CFE should use atls. + wantTyp: "alts", + }, + } + for _, tt := range tests { + t.Run(bundleTyp+" "+tt.name, func(t *testing.T) { + _, info, err := tc.TransportCredentials().ClientHandshake(tt.ctx, "", nil) + if err != nil { + t.Fatalf("ClientHandshake failed: %v", err) + } + if gotType := info.AuthType(); gotType != tt.wantTyp { + t.Fatalf("unexpected authtype: %v, want: %v", gotType, tt.wantTyp) + } + + _, infoServer, err := tc.TransportCredentials().ServerHandshake(nil) + if err != nil { + t.Fatalf("ClientHandshake failed: %v", err) + } + // ServerHandshake should always do TLS. + if gotType := infoServer.AuthType(); gotType != "tls" { + t.Fatalf("unexpected server authtype: %v, want: %v", gotType, "tls") + } + }) + } + } +} diff --git a/credentials/google/xds.go b/credentials/google/xds.go new file mode 100644 index 000000000000..2c5c8b9eee13 --- /dev/null +++ b/credentials/google/xds.go @@ -0,0 +1,128 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package google + +import ( + "context" + "net" + "net/url" + "strings" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" +) + +const cfeClusterNamePrefix = "google_cfe_" +const cfeClusterResourceNamePrefix = "/envoy.config.cluster.v3.Cluster/google_cfe_" +const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" + +// clusterTransportCreds is a combo of TLS + ALTS. +// +// On the client, ClientHandshake picks TLS or ALTS based on address attributes. +// - if attributes has cluster name +// - if cluster name has prefix "google_cfe_", or +// "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", +// use TLS +// - otherwise, use ALTS +// +// - else, do TLS +// +// On the server, ServerHandshake always does TLS. +type clusterTransportCreds struct { + tls credentials.TransportCredentials + alts credentials.TransportCredentials +} + +func newClusterTransportCreds(tls, alts credentials.TransportCredentials) *clusterTransportCreds { + return &clusterTransportCreds{ + tls: tls, + alts: alts, + } +} + +// clusterName returns the xDS cluster name stored in the attributes in the +// context. +func clusterName(ctx context.Context) string { + chi := credentials.ClientHandshakeInfoFromContext(ctx) + if chi.Attributes == nil { + return "" + } + cluster, _ := internal.GetXDSHandshakeClusterName(chi.Attributes) + return cluster +} + +// isDirectPathCluster returns true if the cluster in the context is a +// directpath cluster, meaning ALTS should be used. +func isDirectPathCluster(ctx context.Context) bool { + cluster := clusterName(ctx) + if cluster == "" { + // No cluster; not xDS; use TLS. + return false + } + if strings.HasPrefix(cluster, cfeClusterNamePrefix) { + // xDS cluster prefixed by "google_cfe_"; use TLS. + return false + } + if !strings.HasPrefix(cluster, "xdstp:") { + // Other xDS cluster name; use ALTS. + return true + } + u, err := url.Parse(cluster) + if err != nil { + // Shouldn't happen, but assume ALTS. + return true + } + // If authority AND path match our CFE checks, use TLS; otherwise use ALTS. + return u.Host != cfeClusterAuthorityName || !strings.HasPrefix(u.Path, cfeClusterResourceNamePrefix) +} + +func (c *clusterTransportCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if isDirectPathCluster(ctx) { + // If attributes have cluster name, and cluster name is not cfe, it's a + // backend address, use ALTS. + return c.alts.ClientHandshake(ctx, authority, rawConn) + } + return c.tls.ClientHandshake(ctx, authority, rawConn) +} + +func (c *clusterTransportCreds) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return c.tls.ServerHandshake(conn) +} + +func (c *clusterTransportCreds) Info() credentials.ProtocolInfo { + // TODO: this always returns tls.Info now, because we don't have a cluster + // name to check when this method is called. This method doesn't affect + // anything important now. We may want to revisit this if it becomes more + // important later. + return c.tls.Info() +} + +func (c *clusterTransportCreds) Clone() credentials.TransportCredentials { + return &clusterTransportCreds{ + tls: c.tls.Clone(), + alts: c.alts.Clone(), + } +} + +func (c *clusterTransportCreds) OverrideServerName(s string) error { + if err := c.tls.OverrideServerName(s); err != nil { + return err + } + return c.alts.OverrideServerName(s) +} diff --git a/credentials/google/xds_test.go b/credentials/google/xds_test.go new file mode 100644 index 000000000000..8aeba396a518 --- /dev/null +++ b/credentials/google/xds_test.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package google + +import ( + "context" + "testing" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/resolver" +) + +func (s) TestIsDirectPathCluster(t *testing.T) { + c := func(cluster string) context.Context { + return icredentials.NewClientHandshakeInfoContext(context.Background(), credentials.ClientHandshakeInfo{ + Attributes: internal.SetXDSHandshakeClusterName(resolver.Address{}, cluster).Attributes, + }) + } + + testCases := []struct { + name string + ctx context.Context + want bool + }{ + {"not an xDS cluster", context.Background(), false}, + {"cfe", c("google_cfe_bigtable.googleapis.com"), false}, + {"non-cfe", c("google_bigtable.googleapis.com"), true}, + {"starts with xdstp but not cfe format", c("xdstp:google_cfe_bigtable.googleapis.com"), true}, + {"no authority", c("xdstp:///envoy.config.cluster.v3.Cluster/google_cfe_"), true}, + {"wrong authority", c("xdstp://foo.bar/envoy.config.cluster.v3.Cluster/google_cfe_"), true}, + {"xdstp CFE", c("xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_"), false}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if got := isDirectPathCluster(tc.ctx); got != tc.want { + t.Errorf("isDirectPathCluster(_) = %v; want %v", got, tc.want) + } + }) + } +} diff --git a/credentials/insecure/insecure.go b/credentials/insecure/insecure.go index c4fa27c920da..82bee1443bfe 100644 --- a/credentials/insecure/insecure.go +++ b/credentials/insecure/insecure.go @@ -18,11 +18,6 @@ // Package insecure provides an implementation of the // credentials.TransportCredentials interface which disables transport security. -// -// Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. package insecure import ( @@ -33,6 +28,9 @@ import ( ) // NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. func NewCredentials() credentials.TransportCredentials { return insecureTC{} } @@ -72,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/credentials/local/local.go b/credentials/local/local.go index f772bc1307b2..d5a3a8596056 100644 --- a/credentials/local/local.go +++ b/credentials/local/local.go @@ -23,7 +23,7 @@ // reported. If local credentials is not used in local connections // (local TCP or UDS), it will fail. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/credentials/local/local_test.go b/credentials/local/local_test.go index 00ae39f07e56..47f8dbb4ec85 100644 --- a/credentials/local/local_test.go +++ b/credentials/local/local_test.go @@ -131,11 +131,13 @@ func serverHandle(hs serverHandshake, done chan testServerHandleResult, lis net. serverRawConn, err := lis.Accept() if err != nil { done <- testServerHandleResult{authInfo: nil, err: fmt.Errorf("Server failed to accept connection. Error: %v", err)} + return } serverAuthInfo, err := hs(serverRawConn) if err != nil { serverRawConn.Close() done <- testServerHandleResult{authInfo: nil, err: fmt.Errorf("Server failed while handshake. Error: %v", err)} + return } done <- testServerHandleResult{authInfo: serverAuthInfo, err: nil} } diff --git a/credentials/oauth/oauth.go b/credentials/oauth/oauth.go index 852ae375cfc7..d475cbc0894c 100644 --- a/credentials/oauth/oauth.go +++ b/credentials/oauth/oauth.go @@ -22,7 +22,8 @@ package oauth import ( "context" "fmt" - "io/ioutil" + "net/url" + "os" "sync" "golang.org/x/oauth2" @@ -56,13 +57,23 @@ func (ts TokenSource) RequireTransportSecurity() bool { return true } +// removeServiceNameFromJWTURI removes RPC service name from URI. +func removeServiceNameFromJWTURI(uri string) (string, error) { + parsed, err := url.Parse(uri) + if err != nil { + return "", err + } + parsed.Path = "/" + return parsed.String(), nil +} + type jwtAccess struct { jsonKey []byte } // NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) + jsonKey, err := os.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } @@ -75,9 +86,15 @@ func NewJWTAccessFromKey(jsonKey []byte) (credentials.PerRPCCredentials, error) } func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + // Remove RPC service name from URI that will be used as audience + // in a self-signed JWT token. It follows https://google.aip.dev/auth/4111. + aud, err := removeServiceNameFromJWTURI(uri[0]) + if err != nil { + return nil, err + } // TODO: the returned TokenSource is reusable. Store it in a sync.Map, with // uri as the key, to avoid recreating for every RPC. - ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0]) + ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, aud) if err != nil { return nil, err } @@ -104,6 +121,8 @@ type oauthAccess struct { } // NewOauthAccess constructs the PerRPCCredentials using a given token. +// +// Deprecated: use oauth.TokenSource instead. func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } @@ -173,7 +192,7 @@ func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerR // NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file // of a Google Developers service account. func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) + jsonKey, err := os.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } diff --git a/credentials/oauth/oauth_test.go b/credentials/oauth/oauth_test.go new file mode 100644 index 000000000000..7e62ecb36c12 --- /dev/null +++ b/credentials/oauth/oauth_test.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package oauth + +import ( + "strings" + "testing" +) + +func checkErrorMsg(err error, msg string) bool { + if err == nil && msg == "" { + return true + } else if err != nil { + return strings.Contains(err.Error(), msg) + } + return false +} + +func TestRemoveServiceNameFromJWTURI(t *testing.T) { + tests := []struct { + name string + uri string + wantedURI string + wantedErrMsg string + }{ + { + name: "invalid URI", + uri: "ht tp://foo.com", + wantedErrMsg: "first path segment in URL cannot contain colon", + }, + { + name: "valid URI", + uri: "https://foo.com/go/", + wantedURI: "https://foo.com/", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got, err := removeServiceNameFromJWTURI(tt.uri); got != tt.wantedURI || !checkErrorMsg(err, tt.wantedErrMsg) { + t.Errorf("RemoveServiceNameFromJWTURI() = %s, %v, want %s, %v", got, err, tt.wantedURI, tt.wantedErrMsg) + } + }) + } +} diff --git a/credentials/sts/sts.go b/credentials/sts/sts.go index 9285192a8eba..0110201a98f3 100644 --- a/credentials/sts/sts.go +++ b/credentials/sts/sts.go @@ -1,5 +1,3 @@ -// +build go1.13 - /* * * Copyright 2020 gRPC authors. @@ -21,7 +19,7 @@ // Package sts implements call credentials using STS (Security Token Service) as // defined in https://tools.ietf.org/html/rfc8693. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be changed or // removed in a later release. @@ -35,9 +33,10 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/url" + "os" "sync" "time" @@ -60,8 +59,8 @@ const ( var ( loadSystemCertPool = x509.SystemCertPool makeHTTPDoer = makeHTTPClient - readSubjectTokenFrom = ioutil.ReadFile - readActorTokenFrom = ioutil.ReadFile + readSubjectTokenFrom = os.ReadFile + readActorTokenFrom = os.ReadFile logger = grpclog.Component("credentials") ) @@ -107,7 +106,7 @@ type Options struct { // ActorTokenType is an identifier, as described in // https://tools.ietf.org/html/rfc8693#section-3, that indicates the type of - // the the security token in the "actor_token_path" parameter. + // the security token in the "actor_token_path" parameter. ActorTokenType string // Optional. } @@ -247,12 +246,12 @@ func (c *callCreds) cachedMetadata() map[string]string { // constructRequest creates the STS request body in JSON based on the provided // options. -// - Contents of the subjectToken are read from the file specified in -// options. If we encounter an error here, we bail out. -// - Contents of the actorToken are read from the file specified in options. -// If we encounter an error here, we ignore this field because this is -// optional. -// - Most of the other fields in the request come directly from options. +// - Contents of the subjectToken are read from the file specified in +// options. If we encounter an error here, we bail out. +// - Contents of the actorToken are read from the file specified in options. +// If we encounter an error here, we ignore this field because this is +// optional. +// - Most of the other fields in the request come directly from options. // // A new HTTP request is created by calling http.NewRequestWithContext() and // passing the provided context, thereby enforcing any timeouts specified in @@ -308,7 +307,7 @@ func sendRequest(client httpDoer, req *http.Request) ([]byte, error) { // When the http.Client returns a non-nil error, it is the // responsibility of the caller to read the response body till an EOF is // encountered and to close it. - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) resp.Body.Close() if err != nil { return nil, err diff --git a/credentials/sts/sts_test.go b/credentials/sts/sts_test.go index ac680e001112..70bfa8b046f3 100644 --- a/credentials/sts/sts_test.go +++ b/credentials/sts/sts_test.go @@ -1,5 +1,3 @@ -// +build go1.13 - /* * * Copyright 2020 gRPC authors. @@ -27,7 +25,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httputil" "strings" @@ -37,7 +35,7 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" ) @@ -104,7 +102,7 @@ func createTestContext(ctx context.Context, s credentials.SecurityLevel) context Method: "testInfo", AuthInfo: auth, } - return internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + return icredentials.NewRequestInfoContext(ctx, ri) } // errReader implements the io.Reader interface and returns an error from the @@ -116,7 +114,7 @@ func (r errReader) Read(b []byte) (n int, err error) { } // We need a function to construct the response instead of simply declaring it -// as a variable since the the response body will be consumed by the +// as a variable since the response body will be consumed by the // credentials, and therefore we will need a new one everytime. func makeGoodResponse() *http.Response { respJSON, _ := json.Marshal(responseParameters{ @@ -125,7 +123,7 @@ func makeGoodResponse() *http.Response { TokenType: "Bearer", ExpiresIn: 3600, }) - respBody := ioutil.NopCloser(bytes.NewReader(respJSON)) + respBody := io.NopCloser(bytes.NewReader(respJSON)) return &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, @@ -332,7 +330,7 @@ func (s) TestGetRequestMetadataCacheExpiry(t *testing.T) { TokenType: "Bearer", ExpiresIn: expiresInSecs, }) - respBody := ioutil.NopCloser(bytes.NewReader(respJSON)) + respBody := io.NopCloser(bytes.NewReader(respJSON)) resp := &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, @@ -368,7 +366,7 @@ func (s) TestGetRequestMetadataBadResponses(t *testing.T) { response: &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, - Body: ioutil.NopCloser(strings.NewReader("not JSON")), + Body: io.NopCloser(strings.NewReader("not JSON")), }, }, { @@ -376,7 +374,7 @@ func (s) TestGetRequestMetadataBadResponses(t *testing.T) { response: &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, - Body: ioutil.NopCloser(strings.NewReader("{}")), + Body: io.NopCloser(strings.NewReader("{}")), }, }, } @@ -671,7 +669,7 @@ func (s) TestSendRequest(t *testing.T) { resp: &http.Response{ Status: "200 OK", StatusCode: http.StatusOK, - Body: ioutil.NopCloser(errReader{}), + Body: io.NopCloser(errReader{}), }, wantErr: true, }, @@ -680,7 +678,7 @@ func (s) TestSendRequest(t *testing.T) { resp: &http.Response{ Status: "400 BadRequest", StatusCode: http.StatusBadRequest, - Body: ioutil.NopCloser(strings.NewReader("")), + Body: io.NopCloser(strings.NewReader("")), }, wantErr: true, }, diff --git a/credentials/tls.go b/credentials/tls.go index 8ee7124f2265..877b7cd21af7 100644 --- a/credentials/tls.go +++ b/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", } diff --git a/credentials/tls/certprovider/distributor.go b/credentials/tls/certprovider/distributor.go index fdb38a663fe8..11fae92adace 100644 --- a/credentials/tls/certprovider/distributor.go +++ b/credentials/tls/certprovider/distributor.go @@ -31,11 +31,11 @@ import ( // // Provider implementations which choose to use a Distributor should do the // following: -// - create a new Distributor using the NewDistributor() function. -// - invoke the Set() method whenever they have new key material or errors to -// report. -// - delegate to the distributor when handing calls to KeyMaterial(). -// - invoke the Stop() method when they are done using the distributor. +// - create a new Distributor using the NewDistributor() function. +// - invoke the Set() method whenever they have new key material or errors to +// report. +// - delegate to the distributor when handing calls to KeyMaterial(). +// - invoke the Stop() method when they are done using the distributor. type Distributor struct { // mu protects the underlying key material. mu sync.Mutex diff --git a/credentials/tls/certprovider/distributor_test.go b/credentials/tls/certprovider/distributor_test.go index bec00e919bcf..48d51375616f 100644 --- a/credentials/tls/certprovider/distributor_test.go +++ b/credentials/tls/certprovider/distributor_test.go @@ -1,5 +1,3 @@ -// +build go1.13 - /* * * Copyright 2020 gRPC authors. diff --git a/credentials/tls/certprovider/meshca/builder.go b/credentials/tls/certprovider/meshca/builder.go deleted file mode 100644 index 4b8af7c9b3c5..000000000000 --- a/credentials/tls/certprovider/meshca/builder.go +++ /dev/null @@ -1,165 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "crypto/x509" - "encoding/json" - "fmt" - "sync" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/sts" - "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/internal/backoff" -) - -const pluginName = "mesh_ca" - -// For overriding in unit tests. -var ( - grpcDialFunc = grpc.Dial - backoffFunc = backoff.DefaultExponential.Backoff -) - -func init() { - certprovider.Register(newPluginBuilder()) -} - -func newPluginBuilder() *pluginBuilder { - return &pluginBuilder{clients: make(map[ccMapKey]*refCountedCC)} -} - -// Key for the map containing ClientConns to the MeshCA server. Only the server -// name and the STS options (which is used to create call creds) from the plugin -// configuration determine if two configs can share the same ClientConn. Hence -// only those form the key to this map. -type ccMapKey struct { - name string - stsOpts sts.Options -} - -// refCountedCC wraps a grpc.ClientConn to MeshCA along with a reference count. -type refCountedCC struct { - cc *grpc.ClientConn - refCnt int -} - -// pluginBuilder is an implementation of the certprovider.Builder interface, -// which builds certificate provider instances to get certificates signed from -// the MeshCA. -type pluginBuilder struct { - // A collection of ClientConns to the MeshCA server along with a reference - // count. Provider instances whose config point to the same server name will - // end up sharing the ClientConn. - mu sync.Mutex - clients map[ccMapKey]*refCountedCC -} - -// ParseConfig parses the configuration to be passed to the MeshCA plugin -// implementation. Expects the config to be a json.RawMessage which contains a -// serialized JSON representation of the meshca_experimental.GoogleMeshCaConfig -// proto message. -// -// Takes care of sharing the ClientConn to the MeshCA server among -// different plugin instantiations. -func (b *pluginBuilder) ParseConfig(c interface{}) (*certprovider.BuildableConfig, error) { - data, ok := c.(json.RawMessage) - if !ok { - return nil, fmt.Errorf("meshca: unsupported config type: %T", c) - } - cfg, err := pluginConfigFromJSON(data) - if err != nil { - return nil, err - } - return certprovider.NewBuildableConfig(pluginName, cfg.canonical(), func(opts certprovider.BuildOptions) certprovider.Provider { - return b.buildFromConfig(cfg, opts) - }), nil -} - -// buildFromConfig builds a certificate provider instance for the given config -// and options. Provider instances are shared wherever possible. -func (b *pluginBuilder) buildFromConfig(cfg *pluginConfig, opts certprovider.BuildOptions) certprovider.Provider { - b.mu.Lock() - defer b.mu.Unlock() - - ccmk := ccMapKey{ - name: cfg.serverURI, - stsOpts: cfg.stsOpts, - } - rcc, ok := b.clients[ccmk] - if !ok { - // STS call credentials take care of exchanging a locally provisioned - // JWT token for an access token which will be accepted by the MeshCA. - callCreds, err := sts.NewCredentials(cfg.stsOpts) - if err != nil { - logger.Errorf("sts.NewCredentials() failed: %v", err) - return nil - } - - // MeshCA is a public endpoint whose certificate is Web-PKI compliant. - // So, we just need to use the system roots to authenticate the MeshCA. - cp, err := x509.SystemCertPool() - if err != nil { - logger.Errorf("x509.SystemCertPool() failed: %v", err) - return nil - } - transportCreds := credentials.NewClientTLSFromCert(cp, "") - - cc, err := grpcDialFunc(cfg.serverURI, grpc.WithTransportCredentials(transportCreds), grpc.WithPerRPCCredentials(callCreds)) - if err != nil { - logger.Errorf("grpc.Dial(%s) failed: %v", cfg.serverURI, err) - return nil - } - - rcc = &refCountedCC{cc: cc} - b.clients[ccmk] = rcc - } - rcc.refCnt++ - - p := newProviderPlugin(providerParams{ - cc: rcc.cc, - cfg: cfg, - opts: opts, - backoff: backoffFunc, - doneFunc: func() { - // The plugin implementation will invoke this function when it is - // being closed, and here we take care of closing the ClientConn - // when there are no more plugins using it. We need to acquire the - // lock before accessing the rcc from the enclosing function. - b.mu.Lock() - defer b.mu.Unlock() - rcc.refCnt-- - if rcc.refCnt == 0 { - logger.Infof("Closing grpc.ClientConn to %s", ccmk.name) - rcc.cc.Close() - delete(b.clients, ccmk) - } - }, - }) - return p -} - -// Name returns the MeshCA plugin name. -func (b *pluginBuilder) Name() string { - return pluginName -} diff --git a/credentials/tls/certprovider/meshca/builder_test.go b/credentials/tls/certprovider/meshca/builder_test.go deleted file mode 100644 index 79035d008d9e..000000000000 --- a/credentials/tls/certprovider/meshca/builder_test.go +++ /dev/null @@ -1,177 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "context" - "encoding/json" - "fmt" - "testing" - - "google.golang.org/grpc" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/internal/testutils" -) - -func overrideHTTPFuncs() func() { - // Directly override the functions which are used to read the zone and - // audience instead of overriding the http.Client. - origReadZone := readZoneFunc - readZoneFunc = func(httpDoer) string { return "test-zone" } - origReadAudience := readAudienceFunc - readAudienceFunc = func(httpDoer) string { return "test-audience" } - return func() { - readZoneFunc = origReadZone - readAudienceFunc = origReadAudience - } -} - -func (s) TestBuildSameConfig(t *testing.T) { - defer overrideHTTPFuncs()() - - // We will attempt to create `cnt` number of providers. So we create a - // channel of the same size here, even though we expect only one ClientConn - // to be pushed into this channel. This makes sure that even if more than - // one ClientConn ends up being created, the Build() call does not block. - const cnt = 5 - ccChan := testutils.NewChannelWithSize(cnt) - - // Override the dial func to dial a dummy MeshCA endpoint, and also push the - // returned ClientConn on a channel to be inspected by the test. - origDialFunc := grpcDialFunc - grpcDialFunc = func(string, ...grpc.DialOption) (*grpc.ClientConn, error) { - cc, err := grpc.Dial("dummy-meshca-endpoint", grpc.WithInsecure()) - ccChan.Send(cc) - return cc, err - } - defer func() { grpcDialFunc = origDialFunc }() - - // Parse a good config to generate a stable config which will be passed to - // invocations of Build(). - builder := newPluginBuilder() - buildableConfig, err := builder.ParseConfig(goodConfigFullySpecified) - if err != nil { - t.Fatalf("builder.ParseConfig(%q) failed: %v", goodConfigFullySpecified, err) - } - - // Create multiple providers with the same config. All these providers must - // end up sharing the same ClientConn. - providers := []certprovider.Provider{} - for i := 0; i < cnt; i++ { - p, err := buildableConfig.Build(certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("Build(%+v) failed: %v", buildableConfig, err) - } - providers = append(providers, p) - } - - // Make sure only one ClientConn is created. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - val, err := ccChan.Receive(ctx) - if err != nil { - t.Fatalf("Failed to create ClientConn: %v", err) - } - testCC := val.(*grpc.ClientConn) - - // Attempt to read the second ClientConn should timeout. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer cancel() - if _, err := ccChan.Receive(ctx); err != context.DeadlineExceeded { - t.Fatal("Builder created more than one ClientConn") - } - - for _, p := range providers { - p.Close() - } - - for { - state := testCC.GetState() - if state == connectivity.Shutdown { - break - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if !testCC.WaitForStateChange(ctx, state) { - t.Fatalf("timeout waiting for clientConn state to change from %s", state) - } - } -} - -func (s) TestBuildDifferentConfig(t *testing.T) { - defer overrideHTTPFuncs()() - - // We will attempt to create two providers with different configs. So we - // expect two ClientConns to be pushed on to this channel. - const cnt = 2 - ccChan := testutils.NewChannelWithSize(cnt) - - // Override the dial func to dial a dummy MeshCA endpoint, and also push the - // returned ClientConn on a channel to be inspected by the test. - origDialFunc := grpcDialFunc - grpcDialFunc = func(string, ...grpc.DialOption) (*grpc.ClientConn, error) { - cc, err := grpc.Dial("dummy-meshca-endpoint", grpc.WithInsecure()) - ccChan.Send(cc) - return cc, err - } - defer func() { grpcDialFunc = origDialFunc }() - - builder := newPluginBuilder() - providers := []certprovider.Provider{} - for i := 0; i < cnt; i++ { - // Copy the good test config and modify the serverURI to make sure that - // a new provider is created for the config. - inputConfig := json.RawMessage(fmt.Sprintf(goodConfigFormatStr, fmt.Sprintf("test-mesh-ca:%d", i))) - buildableConfig, err := builder.ParseConfig(inputConfig) - if err != nil { - t.Fatalf("builder.ParseConfig(%q) failed: %v", inputConfig, err) - } - - p, err := buildableConfig.Build(certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("Build(%+v) failed: %v", buildableConfig, err) - } - providers = append(providers, p) - } - - // Make sure two ClientConns are created. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - for i := 0; i < cnt; i++ { - if _, err := ccChan.Receive(ctx); err != nil { - t.Fatalf("Failed to create ClientConn: %v", err) - } - } - - // Close the first provider, and attempt to read key material from the - // second provider. The call to read key material should timeout, but it - // should not return certprovider.errProviderClosed. - providers[0].Close() - ctx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer cancel() - if _, err := providers[1].KeyMaterial(ctx); err != context.DeadlineExceeded { - t.Fatalf("provider.KeyMaterial(ctx) = %v, want contextDeadlineExceeded", err) - } - - // Close the second provider to make sure that the leakchecker is happy. - providers[1].Close() -} diff --git a/credentials/tls/certprovider/meshca/config.go b/credentials/tls/certprovider/meshca/config.go deleted file mode 100644 index c0772b3bb7ea..000000000000 --- a/credentials/tls/certprovider/meshca/config.go +++ /dev/null @@ -1,310 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/http/httputil" - "path" - "strings" - "time" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - "github.com/golang/protobuf/ptypes" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/types/known/durationpb" - - "google.golang.org/grpc/credentials/sts" -) - -const ( - // GKE metadata server endpoint. - mdsBaseURI = "http://metadata.google.internal/" - mdsRequestTimeout = 5 * time.Second - - // The following are default values used in the interaction with MeshCA. - defaultMeshCaEndpoint = "meshca.googleapis.com" - defaultCallTimeout = 10 * time.Second - defaultCertLifetimeSecs = 86400 // 24h in seconds - defaultCertGraceTimeSecs = 43200 // 12h in seconds - defaultKeyTypeRSA = "RSA" - defaultKeySize = 2048 - - // The following are default values used in the interaction with STS or - // Secure Token Service, which is used to exchange the JWT token for an - // access token. - defaultSTSEndpoint = "securetoken.googleapis.com" - defaultCloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" - defaultRequestedTokenType = "urn:ietf:params:oauth:token-type:access_token" - defaultSubjectTokenType = "urn:ietf:params:oauth:token-type:jwt" -) - -// For overriding in unit tests. -var ( - makeHTTPDoer = makeHTTPClient - readZoneFunc = readZone - readAudienceFunc = readAudience -) - -type pluginConfig struct { - serverURI string - stsOpts sts.Options - callTimeout time.Duration - certLifetime time.Duration - certGraceTime time.Duration - keyType string - keySize int - location string -} - -// Type of key to be embedded in CSRs sent to the MeshCA. -const ( - keyTypeUnknown = 0 - keyTypeRSA = 1 -) - -// pluginConfigFromJSON parses the provided config in JSON. -// -// For certain values missing in the config, we use default values defined at -// the top of this file. -// -// If the location field or STS audience field is missing, we try talking to the -// GKE Metadata server and try to infer these values. If this attempt does not -// succeed, we let those fields have empty values. -func pluginConfigFromJSON(data json.RawMessage) (*pluginConfig, error) { - // This anonymous struct corresponds to the expected JSON config. - cfgJSON := &struct { - Server json.RawMessage `json:"server,omitempty"` // Expect a v3corepb.ApiConfigSource - CertificateLifetime json.RawMessage `json:"certificate_lifetime,omitempty"` // Expect a durationpb.Duration - RenewalGracePeriod json.RawMessage `json:"renewal_grace_period,omitempty"` // Expect a durationpb.Duration - KeyType int `json:"key_type,omitempty"` - KeySize int `json:"key_size,omitempty"` - Location string `json:"location,omitempty"` - }{} - if err := json.Unmarshal(data, cfgJSON); err != nil { - return nil, fmt.Errorf("meshca: failed to unmarshal config: %v", err) - } - - // Further unmarshal fields represented as json.RawMessage in the above - // anonymous struct, and use default values if not specified. - server := &v3corepb.ApiConfigSource{} - if cfgJSON.Server != nil { - if err := protojson.Unmarshal(cfgJSON.Server, server); err != nil { - return nil, fmt.Errorf("meshca: protojson.Unmarshal(%+v) failed: %v", cfgJSON.Server, err) - } - } - certLifetime := &durationpb.Duration{Seconds: defaultCertLifetimeSecs} - if cfgJSON.CertificateLifetime != nil { - if err := protojson.Unmarshal(cfgJSON.CertificateLifetime, certLifetime); err != nil { - return nil, fmt.Errorf("meshca: protojson.Unmarshal(%+v) failed: %v", cfgJSON.CertificateLifetime, err) - } - } - certGraceTime := &durationpb.Duration{Seconds: defaultCertGraceTimeSecs} - if cfgJSON.RenewalGracePeriod != nil { - if err := protojson.Unmarshal(cfgJSON.RenewalGracePeriod, certGraceTime); err != nil { - return nil, fmt.Errorf("meshca: protojson.Unmarshal(%+v) failed: %v", cfgJSON.RenewalGracePeriod, err) - } - } - - if api := server.GetApiType(); api != v3corepb.ApiConfigSource_GRPC { - return nil, fmt.Errorf("meshca: server has apiType %s, want %s", api, v3corepb.ApiConfigSource_GRPC) - } - - pc := &pluginConfig{ - certLifetime: certLifetime.AsDuration(), - certGraceTime: certGraceTime.AsDuration(), - } - gs := server.GetGrpcServices() - if l := len(gs); l != 1 { - return nil, fmt.Errorf("meshca: number of gRPC services in config is %d, expected 1", l) - } - grpcService := gs[0] - googGRPC := grpcService.GetGoogleGrpc() - if googGRPC == nil { - return nil, errors.New("meshca: missing google gRPC service in config") - } - pc.serverURI = googGRPC.GetTargetUri() - if pc.serverURI == "" { - pc.serverURI = defaultMeshCaEndpoint - } - - callCreds := googGRPC.GetCallCredentials() - if len(callCreds) == 0 { - return nil, errors.New("meshca: missing call credentials in config") - } - var stsCallCreds *v3corepb.GrpcService_GoogleGrpc_CallCredentials_StsService - for _, cc := range callCreds { - if stsCallCreds = cc.GetStsService(); stsCallCreds != nil { - break - } - } - if stsCallCreds == nil { - return nil, errors.New("meshca: missing STS call credentials in config") - } - if stsCallCreds.GetSubjectTokenPath() == "" { - return nil, errors.New("meshca: missing subjectTokenPath in STS call credentials config") - } - pc.stsOpts = makeStsOptsWithDefaults(stsCallCreds) - - var err error - if pc.callTimeout, err = ptypes.Duration(grpcService.GetTimeout()); err != nil { - pc.callTimeout = defaultCallTimeout - } - switch cfgJSON.KeyType { - case keyTypeUnknown, keyTypeRSA: - pc.keyType = defaultKeyTypeRSA - default: - return nil, fmt.Errorf("meshca: unsupported key type: %s, only support RSA keys", pc.keyType) - } - pc.keySize = cfgJSON.KeySize - if pc.keySize == 0 { - pc.keySize = defaultKeySize - } - pc.location = cfgJSON.Location - if pc.location == "" { - pc.location = readZoneFunc(makeHTTPDoer()) - } - - return pc, nil -} - -func (pc *pluginConfig) canonical() []byte { - return []byte(fmt.Sprintf("%s:%s:%s:%s:%s:%s:%d:%s", pc.serverURI, pc.stsOpts, pc.callTimeout, pc.certLifetime, pc.certGraceTime, pc.keyType, pc.keySize, pc.location)) -} - -func makeStsOptsWithDefaults(stsCallCreds *v3corepb.GrpcService_GoogleGrpc_CallCredentials_StsService) sts.Options { - opts := sts.Options{ - TokenExchangeServiceURI: stsCallCreds.GetTokenExchangeServiceUri(), - Resource: stsCallCreds.GetResource(), - Audience: stsCallCreds.GetAudience(), - Scope: stsCallCreds.GetScope(), - RequestedTokenType: stsCallCreds.GetRequestedTokenType(), - SubjectTokenPath: stsCallCreds.GetSubjectTokenPath(), - SubjectTokenType: stsCallCreds.GetSubjectTokenType(), - ActorTokenPath: stsCallCreds.GetActorTokenPath(), - ActorTokenType: stsCallCreds.GetActorTokenType(), - } - - // Use sane defaults for unspecified fields. - if opts.TokenExchangeServiceURI == "" { - opts.TokenExchangeServiceURI = defaultSTSEndpoint - } - if opts.Audience == "" { - opts.Audience = readAudienceFunc(makeHTTPDoer()) - } - if opts.Scope == "" { - opts.Scope = defaultCloudPlatformScope - } - if opts.RequestedTokenType == "" { - opts.RequestedTokenType = defaultRequestedTokenType - } - if opts.SubjectTokenType == "" { - opts.SubjectTokenType = defaultSubjectTokenType - } - return opts -} - -// httpDoer wraps the single method on the http.Client type that we use. This -// helps with overriding in unit tests. -type httpDoer interface { - Do(req *http.Request) (*http.Response, error) -} - -func makeHTTPClient() httpDoer { - return &http.Client{Timeout: mdsRequestTimeout} -} - -func readMetadata(client httpDoer, uriPath string) (string, error) { - req, err := http.NewRequest("GET", mdsBaseURI+uriPath, nil) - if err != nil { - return "", err - } - req.Header.Add("Metadata-Flavor", "Google") - - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - dump, err := httputil.DumpRequestOut(req, false) - if err != nil { - logger.Warningf("Failed to dump HTTP request: %v", err) - } - logger.Warningf("Request %q returned status %v", dump, resp.StatusCode) - } - return string(body), err -} - -func readZone(client httpDoer) string { - zoneURI := "computeMetadata/v1/instance/zone" - data, err := readMetadata(client, zoneURI) - if err != nil { - logger.Warningf("GET %s failed: %v", path.Join(mdsBaseURI, zoneURI), err) - return "" - } - - // The output returned by the metadata server looks like this: - // projects//zones/ - parts := strings.Split(data, "/") - if len(parts) == 0 { - logger.Warningf("GET %s returned {%s}, does not match expected format {projects//zones/}", path.Join(mdsBaseURI, zoneURI)) - return "" - } - return parts[len(parts)-1] -} - -// readAudience constructs the audience field to be used in the STS request, if -// it is not specified in the plugin configuration. -// -// "identitynamespace:{TRUST_DOMAIN}:{GKE_CLUSTER_URL}" is the format of the -// audience field. When workload identity is enabled on a GCP project, a default -// trust domain is created whose value is "{PROJECT_ID}.svc.id.goog". The format -// of the GKE_CLUSTER_URL is: -// https://container.googleapis.com/v1/projects/{PROJECT_ID}/zones/{ZONE}/clusters/{CLUSTER_NAME}. -func readAudience(client httpDoer) string { - projURI := "computeMetadata/v1/project/project-id" - project, err := readMetadata(client, projURI) - if err != nil { - logger.Warningf("GET %s failed: %v", path.Join(mdsBaseURI, projURI), err) - return "" - } - trustDomain := fmt.Sprintf("%s.svc.id.goog", project) - - clusterURI := "computeMetadata/v1/instance/attributes/cluster-name" - cluster, err := readMetadata(client, clusterURI) - if err != nil { - logger.Warningf("GET %s failed: %v", path.Join(mdsBaseURI, clusterURI), err) - return "" - } - zone := readZoneFunc(client) - clusterURL := fmt.Sprintf("https://container.googleapis.com/v1/projects/%s/zones/%s/clusters/%s", project, zone, cluster) - audience := fmt.Sprintf("identitynamespace:%s:%s", trustDomain, clusterURL) - return audience -} diff --git a/credentials/tls/certprovider/meshca/config_test.go b/credentials/tls/certprovider/meshca/config_test.go deleted file mode 100644 index 5deb484f341c..000000000000 --- a/credentials/tls/certprovider/meshca/config_test.go +++ /dev/null @@ -1,375 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" -) - -const ( - testProjectID = "test-project-id" - testGKECluster = "test-gke-cluster" - testGCEZone = "test-zone" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -var ( - goodConfigFormatStr = ` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "target_uri": %q, - "call_credentials": [ - { - "access_token": "foo" - }, - { - "sts_service": { - "token_exchange_service_uri": "http://test-sts", - "resource": "test-resource", - "audience": "test-audience", - "scope": "test-scope", - "requested_token_type": "test-requested-token-type", - "subject_token_path": "test-subject-token-path", - "subject_token_type": "test-subject-token-type", - "actor_token_path": "test-actor-token-path", - "actor_token_type": "test-actor-token-type" - } - } - ] - }, - "timeout": "10s" - } - ] - }, - "certificate_lifetime": "86400s", - "renewal_grace_period": "43200s", - "key_type": 1, - "key_size": 2048, - "location": "us-west1-b" - }` - goodConfigWithDefaults = json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "call_credentials": [ - { - "sts_service": { - "subject_token_path": "test-subject-token-path" - } - } - ] - }, - "timeout": "10s" - } - ] - } - }`) -) - -var goodConfigFullySpecified = json.RawMessage(fmt.Sprintf(goodConfigFormatStr, "test-meshca")) - -// verifyReceivedRequest reads the HTTP request received by the fake client -// (exposed through a channel), and verifies that it matches the expected -// request. -func verifyReceivedRequest(fc *testutils.FakeHTTPClient, wantURI string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - val, err := fc.ReqChan.Receive(ctx) - if err != nil { - return err - } - gotReq := val.(*http.Request) - if gotURI := gotReq.URL.String(); gotURI != wantURI { - return fmt.Errorf("request contains URL %q want %q", gotURI, wantURI) - } - if got, want := gotReq.Header.Get("Metadata-Flavor"), "Google"; got != want { - return fmt.Errorf("request contains flavor %q want %q", got, want) - } - return nil -} - -// TestParseConfigSuccessFullySpecified tests the case where the config is fully -// specified and no defaults are required. -func (s) TestParseConfigSuccessFullySpecified(t *testing.T) { - wantConfig := "test-meshca:http://test-sts:test-resource:test-audience:test-scope:test-requested-token-type:test-subject-token-path:test-subject-token-type:test-actor-token-path:test-actor-token-type:10s:24h0m0s:12h0m0s:RSA:2048:us-west1-b" - - cfg, err := pluginConfigFromJSON(goodConfigFullySpecified) - if err != nil { - t.Fatalf("pluginConfigFromJSON(%q) failed: %v", goodConfigFullySpecified, err) - } - gotConfig := cfg.canonical() - if diff := cmp.Diff(wantConfig, string(gotConfig)); diff != "" { - t.Errorf("pluginConfigFromJSON(%q) returned config does not match expected (-want +got):\n%s", string(goodConfigFullySpecified), diff) - } -} - -// TestParseConfigSuccessWithDefaults tests cases where the config is not fully -// specified, and we end up using some sane defaults. -func (s) TestParseConfigSuccessWithDefaults(t *testing.T) { - wantConfig := fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s", - "meshca.googleapis.com", // Mesh CA Server URI. - "securetoken.googleapis.com", // STS Server URI. - "", // STS Resource Name. - "identitynamespace:test-project-id.svc.id.goog:https://container.googleapis.com/v1/projects/test-project-id/zones/test-zone/clusters/test-gke-cluster", // STS Audience. - "https://www.googleapis.com/auth/cloud-platform", // STS Scope. - "urn:ietf:params:oauth:token-type:access_token", // STS requested token type. - "test-subject-token-path", // STS subject token path. - "urn:ietf:params:oauth:token-type:jwt", // STS subject token type. - "", // STS actor token path. - "", // STS actor token type. - "10s", // Call timeout. - "24h0m0s", // Cert life time. - "12h0m0s", // Cert grace time. - "RSA", // Key type - "2048", // Key size - "test-zone", // Zone - ) - - // We expect the config parser to make four HTTP requests and receive four - // responses. Hence we setup the request and response channels in the fake - // client with appropriate buffer size. - fc := &testutils.FakeHTTPClient{ - ReqChan: testutils.NewChannelWithSize(4), - RespChan: testutils.NewChannelWithSize(4), - } - // Set up the responses to be delivered to the config parser by the fake - // client. The config parser expects responses with project_id, - // gke_cluster_id and gce_zone. The zone is read twice, once as part of - // reading the STS audience and once to get location metadata. - fc.RespChan.Send(&http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(testProjectID))), - }) - fc.RespChan.Send(&http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(testGKECluster))), - }) - fc.RespChan.Send(&http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(fmt.Sprintf("projects/%s/zones/%s", testProjectID, testGCEZone)))), - }) - fc.RespChan.Send(&http.Response{ - Status: "200 OK", - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader([]byte(fmt.Sprintf("projects/%s/zones/%s", testProjectID, testGCEZone)))), - }) - // Override the http.Client with our fakeClient. - origMakeHTTPDoer := makeHTTPDoer - makeHTTPDoer = func() httpDoer { return fc } - defer func() { makeHTTPDoer = origMakeHTTPDoer }() - - // Spawn a goroutine to verify the HTTP requests sent out as part of the - // config parsing. - errCh := make(chan error, 1) - go func() { - if err := verifyReceivedRequest(fc, "http://metadata.google.internal/computeMetadata/v1/project/project-id"); err != nil { - errCh <- err - return - } - if err := verifyReceivedRequest(fc, "http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name"); err != nil { - errCh <- err - return - } - if err := verifyReceivedRequest(fc, "http://metadata.google.internal/computeMetadata/v1/instance/zone"); err != nil { - errCh <- err - return - } - errCh <- nil - }() - - cfg, err := pluginConfigFromJSON(goodConfigWithDefaults) - if err != nil { - t.Fatalf("pluginConfigFromJSON(%q) failed: %v", goodConfigWithDefaults, err) - } - gotConfig := cfg.canonical() - if diff := cmp.Diff(wantConfig, string(gotConfig)); diff != "" { - t.Errorf("builder.ParseConfig(%q) returned config does not match expected (-want +got):\n%s", goodConfigWithDefaults, diff) - } - - if err := <-errCh; err != nil { - t.Fatal(err) - } -} - -// TestParseConfigFailureCases tests several invalid configs which all result in -// config parsing failures. -func (s) TestParseConfigFailureCases(t *testing.T) { - tests := []struct { - desc string - inputConfig json.RawMessage - wantErr string - }{ - { - desc: "invalid JSON", - inputConfig: json.RawMessage(`bad bad json`), - wantErr: "failed to unmarshal config", - }, - { - desc: "bad apiType", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 1 - } - }`), - wantErr: "server has apiType REST, want GRPC", - }, - { - desc: "no grpc services", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2 - } - }`), - wantErr: "number of gRPC services in config is 0, expected 1", - }, - { - desc: "too many grpc services", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [{}, {}] - } - }`), - wantErr: "number of gRPC services in config is 2, expected 1", - }, - { - desc: "missing google grpc service", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "envoyGrpc": {} - } - ] - } - }`), - wantErr: "missing google gRPC service in config", - }, - { - desc: "missing call credentials", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "target_uri": "foo" - } - } - ] - } - }`), - wantErr: "missing call credentials in config", - }, - { - desc: "missing STS call credentials", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "target_uri": "foo", - "call_credentials": [ - { - "access_token": "foo" - } - ] - } - } - ] - } - }`), - wantErr: "missing STS call credentials in config", - }, - { - desc: "with no defaults", - inputConfig: json.RawMessage(` - { - "server": { - "api_type": 2, - "grpc_services": [ - { - "googleGrpc": { - "target_uri": "foo", - "call_credentials": [ - { - "sts_service": {} - } - ] - } - } - ] - } - }`), - wantErr: "missing subjectTokenPath in STS call credentials config", - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - cfg, err := pluginConfigFromJSON(test.inputConfig) - if err == nil { - t.Fatalf("pluginConfigFromJSON(%q) = %v, expected to return error (%v)", test.inputConfig, string(cfg.canonical()), test.wantErr) - - } - if !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("builder.ParseConfig(%q) = (%v), want error (%v)", test.inputConfig, err, test.wantErr) - } - }) - } -} diff --git a/credentials/tls/certprovider/meshca/internal/v1/meshca.pb.go b/credentials/tls/certprovider/meshca/internal/v1/meshca.pb.go deleted file mode 100644 index 387f8c55abc0..000000000000 --- a/credentials/tls/certprovider/meshca/internal/v1/meshca.pb.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2019 Istio Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: istio/google/security/meshca/v1/meshca.proto - -package google_security_meshca_v1 - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Certificate request message. -type MeshCertificateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The request ID must be a valid UUID with the exception that zero UUID is - // not supported (00000000-0000-0000-0000-000000000000). - RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - // PEM-encoded certificate request. - Csr string `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` - // Optional: requested certificate validity period. - Validity *durationpb.Duration `protobuf:"bytes,3,opt,name=validity,proto3" json:"validity,omitempty"` // Reserved 4 -} - -func (x *MeshCertificateRequest) Reset() { - *x = MeshCertificateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_istio_google_security_meshca_v1_meshca_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeshCertificateRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeshCertificateRequest) ProtoMessage() {} - -func (x *MeshCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_istio_google_security_meshca_v1_meshca_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeshCertificateRequest.ProtoReflect.Descriptor instead. -func (*MeshCertificateRequest) Descriptor() ([]byte, []int) { - return file_istio_google_security_meshca_v1_meshca_proto_rawDescGZIP(), []int{0} -} - -func (x *MeshCertificateRequest) GetRequestId() string { - if x != nil { - return x.RequestId - } - return "" -} - -func (x *MeshCertificateRequest) GetCsr() string { - if x != nil { - return x.Csr - } - return "" -} - -func (x *MeshCertificateRequest) GetValidity() *durationpb.Duration { - if x != nil { - return x.Validity - } - return nil -} - -// Certificate response message. -type MeshCertificateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // PEM-encoded certificate chain. - // Leaf cert is element '0'. Root cert is element 'n'. - CertChain []string `protobuf:"bytes,1,rep,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` -} - -func (x *MeshCertificateResponse) Reset() { - *x = MeshCertificateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_istio_google_security_meshca_v1_meshca_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MeshCertificateResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MeshCertificateResponse) ProtoMessage() {} - -func (x *MeshCertificateResponse) ProtoReflect() protoreflect.Message { - mi := &file_istio_google_security_meshca_v1_meshca_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MeshCertificateResponse.ProtoReflect.Descriptor instead. -func (*MeshCertificateResponse) Descriptor() ([]byte, []int) { - return file_istio_google_security_meshca_v1_meshca_proto_rawDescGZIP(), []int{1} -} - -func (x *MeshCertificateResponse) GetCertChain() []string { - if x != nil { - return x.CertChain - } - return nil -} - -var File_istio_google_security_meshca_v1_meshca_proto protoreflect.FileDescriptor - -var file_istio_google_security_meshca_v1_meshca_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, - 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x80, 0x01, 0x0a, 0x16, 0x4d, 0x65, - 0x73, 0x68, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x73, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x63, 0x73, 0x72, 0x12, 0x35, 0x0a, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x22, 0x38, 0x0a, 0x17, - 0x4d, 0x65, 0x73, 0x68, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x63, 0x65, 0x72, - 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x32, 0x96, 0x01, 0x0a, 0x16, 0x4d, 0x65, 0x73, 0x68, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0x7c, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2e, - 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x6d, 0x65, 0x73, 0x68, - 0x63, 0x61, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, - 0x2e, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x63, 0x61, 0x2e, 0x76, 0x31, - 0x42, 0x0b, 0x4d, 0x65, 0x73, 0x68, 0x43, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_istio_google_security_meshca_v1_meshca_proto_rawDescOnce sync.Once - file_istio_google_security_meshca_v1_meshca_proto_rawDescData = file_istio_google_security_meshca_v1_meshca_proto_rawDesc -) - -func file_istio_google_security_meshca_v1_meshca_proto_rawDescGZIP() []byte { - file_istio_google_security_meshca_v1_meshca_proto_rawDescOnce.Do(func() { - file_istio_google_security_meshca_v1_meshca_proto_rawDescData = protoimpl.X.CompressGZIP(file_istio_google_security_meshca_v1_meshca_proto_rawDescData) - }) - return file_istio_google_security_meshca_v1_meshca_proto_rawDescData -} - -var file_istio_google_security_meshca_v1_meshca_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_istio_google_security_meshca_v1_meshca_proto_goTypes = []interface{}{ - (*MeshCertificateRequest)(nil), // 0: google.security.meshca.v1.MeshCertificateRequest - (*MeshCertificateResponse)(nil), // 1: google.security.meshca.v1.MeshCertificateResponse - (*durationpb.Duration)(nil), // 2: google.protobuf.Duration -} -var file_istio_google_security_meshca_v1_meshca_proto_depIdxs = []int32{ - 2, // 0: google.security.meshca.v1.MeshCertificateRequest.validity:type_name -> google.protobuf.Duration - 0, // 1: google.security.meshca.v1.MeshCertificateService.CreateCertificate:input_type -> google.security.meshca.v1.MeshCertificateRequest - 1, // 2: google.security.meshca.v1.MeshCertificateService.CreateCertificate:output_type -> google.security.meshca.v1.MeshCertificateResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_istio_google_security_meshca_v1_meshca_proto_init() } -func file_istio_google_security_meshca_v1_meshca_proto_init() { - if File_istio_google_security_meshca_v1_meshca_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_istio_google_security_meshca_v1_meshca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeshCertificateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_istio_google_security_meshca_v1_meshca_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MeshCertificateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_istio_google_security_meshca_v1_meshca_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_istio_google_security_meshca_v1_meshca_proto_goTypes, - DependencyIndexes: file_istio_google_security_meshca_v1_meshca_proto_depIdxs, - MessageInfos: file_istio_google_security_meshca_v1_meshca_proto_msgTypes, - }.Build() - File_istio_google_security_meshca_v1_meshca_proto = out.File - file_istio_google_security_meshca_v1_meshca_proto_rawDesc = nil - file_istio_google_security_meshca_v1_meshca_proto_goTypes = nil - file_istio_google_security_meshca_v1_meshca_proto_depIdxs = nil -} diff --git a/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go b/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go deleted file mode 100644 index e53a61598aba..000000000000 --- a/credentials/tls/certprovider/meshca/internal/v1/meshca_grpc.pb.go +++ /dev/null @@ -1,106 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package google_security_meshca_v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// MeshCertificateServiceClient is the client API for MeshCertificateService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type MeshCertificateServiceClient interface { - // Using provided CSR, returns a signed certificate that represents a GCP - // service account identity. - CreateCertificate(ctx context.Context, in *MeshCertificateRequest, opts ...grpc.CallOption) (*MeshCertificateResponse, error) -} - -type meshCertificateServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewMeshCertificateServiceClient(cc grpc.ClientConnInterface) MeshCertificateServiceClient { - return &meshCertificateServiceClient{cc} -} - -func (c *meshCertificateServiceClient) CreateCertificate(ctx context.Context, in *MeshCertificateRequest, opts ...grpc.CallOption) (*MeshCertificateResponse, error) { - out := new(MeshCertificateResponse) - err := c.cc.Invoke(ctx, "/google.security.meshca.v1.MeshCertificateService/CreateCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MeshCertificateServiceServer is the server API for MeshCertificateService service. -// All implementations must embed UnimplementedMeshCertificateServiceServer -// for forward compatibility -type MeshCertificateServiceServer interface { - // Using provided CSR, returns a signed certificate that represents a GCP - // service account identity. - CreateCertificate(context.Context, *MeshCertificateRequest) (*MeshCertificateResponse, error) - mustEmbedUnimplementedMeshCertificateServiceServer() -} - -// UnimplementedMeshCertificateServiceServer must be embedded to have forward compatible implementations. -type UnimplementedMeshCertificateServiceServer struct { -} - -func (UnimplementedMeshCertificateServiceServer) CreateCertificate(context.Context, *MeshCertificateRequest) (*MeshCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateCertificate not implemented") -} -func (UnimplementedMeshCertificateServiceServer) mustEmbedUnimplementedMeshCertificateServiceServer() { -} - -// UnsafeMeshCertificateServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to MeshCertificateServiceServer will -// result in compilation errors. -type UnsafeMeshCertificateServiceServer interface { - mustEmbedUnimplementedMeshCertificateServiceServer() -} - -func RegisterMeshCertificateServiceServer(s grpc.ServiceRegistrar, srv MeshCertificateServiceServer) { - s.RegisterService(&MeshCertificateService_ServiceDesc, srv) -} - -func _MeshCertificateService_CreateCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MeshCertificateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MeshCertificateServiceServer).CreateCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/google.security.meshca.v1.MeshCertificateService/CreateCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MeshCertificateServiceServer).CreateCertificate(ctx, req.(*MeshCertificateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// MeshCertificateService_ServiceDesc is the grpc.ServiceDesc for MeshCertificateService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var MeshCertificateService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "google.security.meshca.v1.MeshCertificateService", - HandlerType: (*MeshCertificateServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateCertificate", - Handler: _MeshCertificateService_CreateCertificate_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "istio/google/security/meshca/v1/meshca.proto", -} diff --git a/credentials/tls/certprovider/meshca/plugin.go b/credentials/tls/certprovider/meshca/plugin.go deleted file mode 100644 index ab1958ac1fd0..000000000000 --- a/credentials/tls/certprovider/meshca/plugin.go +++ /dev/null @@ -1,289 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package meshca provides an implementation of the Provider interface which -// communicates with MeshCA to get certificates signed. -package meshca - -import ( - "context" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" - "github.com/google/uuid" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/tls/certprovider" - meshgrpc "google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1" - meshpb "google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/metadata" -) - -// In requests sent to the MeshCA, we add a metadata header with this key and -// the value being the GCE zone in which the workload is running in. -const locationMetadataKey = "x-goog-request-params" - -// For overriding from unit tests. -var newDistributorFunc = func() distributor { return certprovider.NewDistributor() } - -// distributor wraps the methods on certprovider.Distributor which are used by -// the plugin. This is very useful in tests which need to know exactly when the -// plugin updates its key material. -type distributor interface { - KeyMaterial(ctx context.Context) (*certprovider.KeyMaterial, error) - Set(km *certprovider.KeyMaterial, err error) - Stop() -} - -// providerPlugin is an implementation of the certprovider.Provider interface, -// which gets certificates signed by communicating with the MeshCA. -type providerPlugin struct { - distributor // Holds the key material. - cancel context.CancelFunc - cc *grpc.ClientConn // Connection to MeshCA server. - cfg *pluginConfig // Plugin configuration. - opts certprovider.BuildOptions // Key material options. - logger *grpclog.PrefixLogger // Plugin instance specific prefix. - backoff func(int) time.Duration // Exponential backoff. - doneFunc func() // Notify the builder when done. -} - -// providerParams wraps params passed to the provider plugin at creation time. -type providerParams struct { - // This ClientConn to the MeshCA server is owned by the builder. - cc *grpc.ClientConn - cfg *pluginConfig - opts certprovider.BuildOptions - backoff func(int) time.Duration - doneFunc func() -} - -func newProviderPlugin(params providerParams) *providerPlugin { - ctx, cancel := context.WithCancel(context.Background()) - p := &providerPlugin{ - cancel: cancel, - cc: params.cc, - cfg: params.cfg, - opts: params.opts, - backoff: params.backoff, - doneFunc: params.doneFunc, - distributor: newDistributorFunc(), - } - p.logger = prefixLogger((p)) - p.logger.Infof("plugin created") - go p.run(ctx) - return p -} - -func (p *providerPlugin) Close() { - p.logger.Infof("plugin closed") - p.Stop() // Stop the embedded distributor. - p.cancel() - p.doneFunc() -} - -// run is a long running goroutine which periodically sends out CSRs to the -// MeshCA, and updates the underlying Distributor with the new key material. -func (p *providerPlugin) run(ctx context.Context) { - // We need to start fetching key material right away. The next attempt will - // be triggered by the timer firing. - for { - certValidity, err := p.updateKeyMaterial(ctx) - if err != nil { - return - } - - // We request a certificate with the configured validity duration (which - // is usually twice as much as the grace period). But the server is free - // to return a certificate with whatever validity time it deems right. - refreshAfter := p.cfg.certGraceTime - if refreshAfter > certValidity { - // The default value of cert grace time is half that of the default - // cert validity time. So here, when we have to use a non-default - // cert life time, we will set the grace time again to half that of - // the validity time. - refreshAfter = certValidity / 2 - } - timer := time.NewTimer(refreshAfter) - select { - case <-ctx.Done(): - return - case <-timer.C: - } - } -} - -// updateKeyMaterial generates a CSR and attempts to get it signed from the -// MeshCA. It retries with an exponential backoff till it succeeds or the -// deadline specified in ctx expires. Once it gets the CSR signed from the -// MeshCA, it updates the Distributor with the new key material. -// -// It returns the amount of time the new certificate is valid for. -func (p *providerPlugin) updateKeyMaterial(ctx context.Context) (time.Duration, error) { - client := meshgrpc.NewMeshCertificateServiceClient(p.cc) - retries := 0 - for { - if ctx.Err() != nil { - return 0, ctx.Err() - } - - if retries != 0 { - bi := p.backoff(retries) - p.logger.Warningf("Backing off for %s before attempting the next CreateCertificate() request", bi) - timer := time.NewTimer(bi) - select { - case <-timer.C: - case <-ctx.Done(): - return 0, ctx.Err() - } - } - retries++ - - privKey, err := rsa.GenerateKey(rand.Reader, p.cfg.keySize) - if err != nil { - p.logger.Warningf("RSA key generation failed: %v", err) - continue - } - // We do not set any fields in the CSR (we use an empty - // x509.CertificateRequest as the template) because the MeshCA discards - // them anyways, and uses the workload identity from the access token - // that we present (as part of the STS call creds). - csrBytes, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{}, crypto.PrivateKey(privKey)) - if err != nil { - p.logger.Warningf("CSR creation failed: %v", err) - continue - } - csrPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes}) - - // Send out the CSR with a call timeout and location metadata, as - // specified in the plugin configuration. - req := &meshpb.MeshCertificateRequest{ - RequestId: uuid.New().String(), - Csr: string(csrPEM), - Validity: &durationpb.Duration{Seconds: int64(p.cfg.certLifetime / time.Second)}, - } - p.logger.Debugf("Sending CreateCertificate() request: %v", req) - - callCtx, ctxCancel := context.WithTimeout(context.Background(), p.cfg.callTimeout) - callCtx = metadata.NewOutgoingContext(callCtx, metadata.Pairs(locationMetadataKey, p.cfg.location)) - resp, err := client.CreateCertificate(callCtx, req) - if err != nil { - p.logger.Warningf("CreateCertificate request failed: %v", err) - ctxCancel() - continue - } - ctxCancel() - - // The returned cert chain must contain more than one cert. Leaf cert is - // element '0', while root cert is element 'n', and the intermediate - // entries form the chain from the root to the leaf. - certChain := resp.GetCertChain() - if l := len(certChain); l <= 1 { - p.logger.Errorf("Received certificate chain contains %d certificates, need more than one", l) - continue - } - - // We need to explicitly parse the PEM cert contents as an - // x509.Certificate to read the certificate validity period. We use this - // to decide when to refresh the cert. Even though the call to - // tls.X509KeyPair actually parses the PEM contents into an - // x509.Certificate, it does not store that in the `Leaf` field. See: - // https://golang.org/pkg/crypto/tls/#X509KeyPair. - identity, intermediates, roots, err := parseCertChain(certChain) - if err != nil { - p.logger.Errorf(err.Error()) - continue - } - _, err = identity.Verify(x509.VerifyOptions{ - Intermediates: intermediates, - Roots: roots, - }) - if err != nil { - p.logger.Errorf("Certificate verification failed for return certChain: %v", err) - continue - } - - key := x509.MarshalPKCS1PrivateKey(privKey) - keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: key}) - certPair, err := tls.X509KeyPair([]byte(certChain[0]), keyPEM) - if err != nil { - p.logger.Errorf("Failed to create x509 key pair: %v", err) - continue - } - - // At this point, the received response has been deemed good. - retries = 0 - - // All certs signed by the MeshCA roll up to the same root. And treating - // the last element of the returned chain as the root is the only - // supported option to get the root certificate. So, we ignore the - // options specified in the call to Build(), which contain certificate - // name and whether the caller is interested in identity or root cert. - p.Set(&certprovider.KeyMaterial{Certs: []tls.Certificate{certPair}, Roots: roots}, nil) - return time.Until(identity.NotAfter), nil - } -} - -// ParseCertChain parses the result returned by the MeshCA which consists of a -// list of PEM encoded certs. The first element in the list is the leaf or -// identity cert, while the last element is the root, and everything in between -// form the chain of trust. -// -// Caller needs to make sure that certChain has at least two elements. -func parseCertChain(certChain []string) (*x509.Certificate, *x509.CertPool, *x509.CertPool, error) { - identity, err := parseCert([]byte(certChain[0])) - if err != nil { - return nil, nil, nil, err - } - - intermediates := x509.NewCertPool() - for _, cert := range certChain[1 : len(certChain)-1] { - i, err := parseCert([]byte(cert)) - if err != nil { - return nil, nil, nil, err - } - intermediates.AddCert(i) - } - - roots := x509.NewCertPool() - root, err := parseCert([]byte(certChain[len(certChain)-1])) - if err != nil { - return nil, nil, nil, err - } - roots.AddCert(root) - - return identity, intermediates, roots, nil -} - -func parseCert(certPEM []byte) (*x509.Certificate, error) { - block, _ := pem.Decode(certPEM) - if block == nil { - return nil, fmt.Errorf("failed to decode received PEM data: %v", certPEM) - } - return x509.ParseCertificate(block.Bytes) -} diff --git a/credentials/tls/certprovider/meshca/plugin_test.go b/credentials/tls/certprovider/meshca/plugin_test.go deleted file mode 100644 index 51f545d6a0e3..000000000000 --- a/credentials/tls/certprovider/meshca/plugin_test.go +++ /dev/null @@ -1,459 +0,0 @@ -// +build go1.13 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package meshca - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net" - "reflect" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/tls/certprovider" - meshgrpc "google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1" - meshpb "google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1" - "google.golang.org/grpc/internal/testutils" -) - -const ( - // Used when waiting for something that is expected to *not* happen. - defaultTestShortTimeout = 10 * time.Millisecond - defaultTestTimeout = 5 * time.Second - defaultTestCertLife = time.Hour - shortTestCertLife = 2 * time.Second - maxErrCount = 2 -) - -// fakeCA provides a very simple fake implementation of the certificate signing -// service as exported by the MeshCA. -type fakeCA struct { - meshgrpc.UnimplementedMeshCertificateServiceServer - - withErrors bool // Whether the CA returns errors to begin with. - withShortLife bool // Whether to create certs with short lifetime - - ccChan *testutils.Channel // Channel to get notified about CreateCertificate calls. - errors int // Error count. - key *rsa.PrivateKey // Private key of CA. - cert *x509.Certificate // Signing certificate. - certPEM []byte // PEM encoding of signing certificate. -} - -// Returns a new instance of the fake Mesh CA. It generates a new RSA key and a -// self-signed certificate which will be used to sign CSRs received in incoming -// requests. -// withErrors controls whether the fake returns errors before succeeding, while -// withShortLife controls whether the fake returns certs with very small -// lifetimes (to test plugin refresh behavior). Every time a CreateCertificate() -// call succeeds, an event is pushed on the ccChan. -func newFakeMeshCA(ccChan *testutils.Channel, withErrors, withShortLife bool) (*fakeCA, error) { - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, fmt.Errorf("RSA key generation failed: %v", err) - } - - now := time.Now() - tmpl := &x509.Certificate{ - Subject: pkix.Name{CommonName: "my-fake-ca"}, - SerialNumber: big.NewInt(10), - NotBefore: now.Add(-time.Hour), - NotAfter: now.Add(time.Hour), - KeyUsage: x509.KeyUsageCertSign, - IsCA: true, - BasicConstraintsValid: true, - } - certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &key.PublicKey, key) - if err != nil { - return nil, fmt.Errorf("x509.CreateCertificate(%v) failed: %v", tmpl, err) - } - // The PEM encoding of the self-signed certificate is stored because we need - // to return a chain of certificates in the response, starting with the - // client certificate and ending in the root. - certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) - cert, err := x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("x509.ParseCertificate(%v) failed: %v", certDER, err) - } - - return &fakeCA{ - withErrors: withErrors, - withShortLife: withShortLife, - ccChan: ccChan, - key: key, - cert: cert, - certPEM: certPEM, - }, nil -} - -// CreateCertificate helps implement the MeshCA service. -// -// If the fakeMeshCA was created with `withErrors` set to true, the first -// `maxErrCount` number of RPC return errors. Subsequent requests are signed and -// returned without error. -func (f *fakeCA) CreateCertificate(ctx context.Context, req *meshpb.MeshCertificateRequest) (*meshpb.MeshCertificateResponse, error) { - if f.withErrors { - if f.errors < maxErrCount { - f.errors++ - return nil, errors.New("fake Mesh CA error") - - } - } - - csrPEM := []byte(req.GetCsr()) - block, _ := pem.Decode(csrPEM) - if block == nil { - return nil, fmt.Errorf("failed to decode received CSR: %v", csrPEM) - } - csr, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - return nil, fmt.Errorf("failed to parse received CSR: %v", csrPEM) - } - - // By default, we create certs which are valid for an hour. But if - // `withShortLife` is set, we create certs which are valid only for a couple - // of seconds. - now := time.Now() - notBefore, notAfter := now.Add(-defaultTestCertLife), now.Add(defaultTestCertLife) - if f.withShortLife { - notBefore, notAfter = now.Add(-shortTestCertLife), now.Add(shortTestCertLife) - } - tmpl := &x509.Certificate{ - Subject: pkix.Name{CommonName: "signed-cert"}, - SerialNumber: big.NewInt(10), - NotBefore: notBefore, - NotAfter: notAfter, - KeyUsage: x509.KeyUsageDigitalSignature, - } - certDER, err := x509.CreateCertificate(rand.Reader, tmpl, f.cert, csr.PublicKey, f.key) - if err != nil { - return nil, fmt.Errorf("x509.CreateCertificate(%v) failed: %v", tmpl, err) - } - certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) - - // Push to ccChan to indicate that the RPC is processed. - f.ccChan.Send(nil) - - certChain := []string{ - string(certPEM), // Signed certificate corresponding to CSR - string(f.certPEM), // Root certificate - } - return &meshpb.MeshCertificateResponse{CertChain: certChain}, nil -} - -// opts wraps the options to be passed to setup. -type opts struct { - // Whether the CA returns certs with short lifetime. Used to test client refresh. - withShortLife bool - // Whether the CA returns errors to begin with. Used to test client backoff. - withbackoff bool -} - -// events wraps channels which indicate different events. -type events struct { - // Pushed to when the plugin dials the MeshCA. - dialDone *testutils.Channel - // Pushed to when CreateCertifcate() succeeds on the MeshCA. - createCertDone *testutils.Channel - // Pushed to when the plugin updates the distributor with new key material. - keyMaterialDone *testutils.Channel - // Pushed to when the client backs off after a failed CreateCertificate(). - backoffDone *testutils.Channel -} - -// setup performs tasks common to all tests in this file. -func setup(t *testing.T, o opts) (events, string, func()) { - t.Helper() - - // Create a fake MeshCA which pushes events on the passed channel for - // successful RPCs. - createCertDone := testutils.NewChannel() - fs, err := newFakeMeshCA(createCertDone, o.withbackoff, o.withShortLife) - if err != nil { - t.Fatal(err) - } - - // Create a gRPC server and register the fake MeshCA on it. - server := grpc.NewServer() - meshgrpc.RegisterMeshCertificateServiceServer(server, fs) - - // Start a net.Listener on a local port, and pass it to the gRPC server - // created above and start serving. - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal(err) - } - addr := lis.Addr().String() - go server.Serve(lis) - - // Override the plugin's dial function and perform a blocking dial. Also - // push on dialDone once the dial is complete so that test can block on this - // event before verifying other things. - dialDone := testutils.NewChannel() - origDialFunc := grpcDialFunc - grpcDialFunc = func(uri string, _ ...grpc.DialOption) (*grpc.ClientConn, error) { - if uri != addr { - t.Fatalf("plugin dialing MeshCA at %s, want %s", uri, addr) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cc, err := grpc.DialContext(ctx, uri, grpc.WithInsecure(), grpc.WithBlock()) - if err != nil { - t.Fatalf("grpc.DialContext(%s) failed: %v", addr, err) - } - dialDone.Send(nil) - return cc, nil - } - - // Override the plugin's newDistributorFunc and return a wrappedDistributor - // which allows the test to be notified whenever the plugin pushes new key - // material into the distributor. - origDistributorFunc := newDistributorFunc - keyMaterialDone := testutils.NewChannel() - d := newWrappedDistributor(keyMaterialDone) - newDistributorFunc = func() distributor { return d } - - // Override the plugin's backoff function to perform no real backoff, but - // push on a channel so that the test can verifiy that backoff actually - // happened. - backoffDone := testutils.NewChannelWithSize(maxErrCount) - origBackoffFunc := backoffFunc - if o.withbackoff { - // Override the plugin's backoff function with this, so that we can verify - // that a backoff actually was triggered. - backoffFunc = func(v int) time.Duration { - backoffDone.Send(v) - return 0 - } - } - - // Return all the channels, and a cancel function to undo all the overrides. - e := events{ - dialDone: dialDone, - createCertDone: createCertDone, - keyMaterialDone: keyMaterialDone, - backoffDone: backoffDone, - } - done := func() { - server.Stop() - grpcDialFunc = origDialFunc - newDistributorFunc = origDistributorFunc - backoffFunc = origBackoffFunc - } - return e, addr, done -} - -// wrappedDistributor wraps a distributor and pushes on a channel whenever new -// key material is pushed to the distributor. -type wrappedDistributor struct { - *certprovider.Distributor - kmChan *testutils.Channel -} - -func newWrappedDistributor(kmChan *testutils.Channel) *wrappedDistributor { - return &wrappedDistributor{ - kmChan: kmChan, - Distributor: certprovider.NewDistributor(), - } -} - -func (wd *wrappedDistributor) Set(km *certprovider.KeyMaterial, err error) { - wd.Distributor.Set(km, err) - wd.kmChan.Send(nil) -} - -// TestCreateCertificate verifies the simple case where the MeshCA server -// returns a good certificate. -func (s) TestCreateCertificate(t *testing.T) { - e, addr, cancel := setup(t, opts{}) - defer cancel() - - // Set the MeshCA targetURI to point to our fake MeshCA. - inputConfig := json.RawMessage(fmt.Sprintf(goodConfigFormatStr, addr)) - - // Lookup MeshCA plugin builder, parse config and start the plugin. - prov, err := certprovider.GetProvider(pluginName, inputConfig, certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("GetProvider(%s, %s) failed: %v", pluginName, string(inputConfig), err) - } - defer prov.Close() - - // Wait till the plugin dials the MeshCA server. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.dialDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to dial MeshCA") - } - - // Wait till the plugin makes a CreateCertificate() call. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.createCertDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to make CreateCertificate RPC") - } - - // We don't really care about the exact key material returned here. All we - // care about is whether we get any key material at all, and that we don't - // get any errors. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err = prov.KeyMaterial(ctx); err != nil { - t.Fatalf("provider.KeyMaterial(ctx) failed: %v", err) - } -} - -// TestCreateCertificateWithBackoff verifies the case where the MeshCA server -// returns errors initially and then returns a good certificate. The test makes -// sure that the client backs off when the server returns errors. -func (s) TestCreateCertificateWithBackoff(t *testing.T) { - e, addr, cancel := setup(t, opts{withbackoff: true}) - defer cancel() - - // Set the MeshCA targetURI to point to our fake MeshCA. - inputConfig := json.RawMessage(fmt.Sprintf(goodConfigFormatStr, addr)) - - // Lookup MeshCA plugin builder, parse config and start the plugin. - prov, err := certprovider.GetProvider(pluginName, inputConfig, certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("GetProvider(%s, %s) failed: %v", pluginName, string(inputConfig), err) - } - defer prov.Close() - - // Wait till the plugin dials the MeshCA server. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.dialDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to dial MeshCA") - } - - // Making the CreateCertificateRPC involves generating the keys, creating - // the CSR etc which seem to take reasonable amount of time. And in this - // test, the first two attempts will fail. Hence we give it a reasonable - // deadline here. - ctx, cancel = context.WithTimeout(context.Background(), 3*defaultTestTimeout) - defer cancel() - if _, err := e.createCertDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to make CreateCertificate RPC") - } - - // The first `maxErrCount` calls to CreateCertificate end in failure, and - // should lead to a backoff. - for i := 0; i < maxErrCount; i++ { - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.backoffDone.Receive(ctx); err != nil { - t.Fatalf("plugin failed to backoff after error from fake server: %v", err) - } - } - - // We don't really care about the exact key material returned here. All we - // care about is whether we get any key material at all, and that we don't - // get any errors. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err = prov.KeyMaterial(ctx); err != nil { - t.Fatalf("provider.KeyMaterial(ctx) failed: %v", err) - } -} - -// TestCreateCertificateWithRefresh verifies the case where the MeshCA returns a -// certificate with a really short lifetime, and makes sure that the plugin -// refreshes the cert in time. -func (s) TestCreateCertificateWithRefresh(t *testing.T) { - e, addr, cancel := setup(t, opts{withShortLife: true}) - defer cancel() - - // Set the MeshCA targetURI to point to our fake MeshCA. - inputConfig := json.RawMessage(fmt.Sprintf(goodConfigFormatStr, addr)) - - // Lookup MeshCA plugin builder, parse config and start the plugin. - prov, err := certprovider.GetProvider(pluginName, inputConfig, certprovider.BuildOptions{}) - if err != nil { - t.Fatalf("GetProvider(%s, %s) failed: %v", pluginName, string(inputConfig), err) - } - defer prov.Close() - - // Wait till the plugin dials the MeshCA server. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.dialDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to dial MeshCA") - } - - // Wait till the plugin makes a CreateCertificate() call. - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := e.createCertDone.Receive(ctx); err != nil { - t.Fatal("timeout waiting for plugin to make CreateCertificate RPC") - } - - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - km1, err := prov.KeyMaterial(ctx) - if err != nil { - t.Fatalf("provider.KeyMaterial(ctx) failed: %v", err) - } - - // At this point, we have read the first key material, and since the - // returned key material has a really short validity period, we expect the - // key material to be refreshed quite soon. We drain the channel on which - // the event corresponding to setting of new key material is pushed. This - // enables us to block on the same channel, waiting for refreshed key - // material. - // Since we do not expect this call to block, it is OK to pass the - // background context. - e.keyMaterialDone.Receive(context.Background()) - - // Wait for the next call to CreateCertificate() to refresh the certificate - // returned earlier. - ctx, cancel = context.WithTimeout(context.Background(), 2*shortTestCertLife) - defer cancel() - if _, err := e.keyMaterialDone.Receive(ctx); err != nil { - t.Fatalf("CreateCertificate() RPC not made: %v", err) - } - - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - km2, err := prov.KeyMaterial(ctx) - if err != nil { - t.Fatalf("provider.KeyMaterial(ctx) failed: %v", err) - } - - // TODO(easwars): Remove all references to reflect.DeepEqual and use - // cmp.Equal instead. Currently, the later panics because x509.Certificate - // type defines an Equal method, but does not check for nil. This has been - // fixed in - // https://github.com/golang/go/commit/89865f8ba64ccb27f439cce6daaa37c9aa38f351, - // but this is only available starting go1.14. So, once we remove support - // for go1.13, we can make the switch. - if reflect.DeepEqual(km1, km2) { - t.Error("certificate refresh did not happen in the background") - } -} diff --git a/credentials/tls/certprovider/pemfile/watcher.go b/credentials/tls/certprovider/pemfile/watcher.go index e154030cbe8a..7ed5c53ba404 100644 --- a/credentials/tls/certprovider/pemfile/watcher.go +++ b/credentials/tls/certprovider/pemfile/watcher.go @@ -19,7 +19,7 @@ // Package pemfile provides a file watching certificate provider plugin // implementation which works for files with PEM contents. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. @@ -32,7 +32,7 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" + "os" "path/filepath" "time" @@ -154,12 +154,12 @@ func (w *watcher) updateIdentityDistributor() { return } - certFileContents, err := ioutil.ReadFile(w.opts.CertFile) + certFileContents, err := os.ReadFile(w.opts.CertFile) if err != nil { logger.Warningf("certFile (%s) read failed: %v", w.opts.CertFile, err) return } - keyFileContents, err := ioutil.ReadFile(w.opts.KeyFile) + keyFileContents, err := os.ReadFile(w.opts.KeyFile) if err != nil { logger.Warningf("keyFile (%s) read failed: %v", w.opts.KeyFile, err) return @@ -191,7 +191,7 @@ func (w *watcher) updateRootDistributor() { return } - rootFileContents, err := ioutil.ReadFile(w.opts.RootFile) + rootFileContents, err := os.ReadFile(w.opts.RootFile) if err != nil { logger.Warningf("rootFile (%s) read failed: %v", w.opts.RootFile, err) return diff --git a/credentials/tls/certprovider/pemfile/watcher_test.go b/credentials/tls/certprovider/pemfile/watcher_test.go index e43cf7358eca..521f762d3a41 100644 --- a/credentials/tls/certprovider/pemfile/watcher_test.go +++ b/credentials/tls/certprovider/pemfile/watcher_test.go @@ -21,8 +21,6 @@ package pemfile import ( "context" "fmt" - "io/ioutil" - "math/big" "os" "path" "testing" @@ -30,7 +28,6 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" @@ -57,17 +54,15 @@ func Test(t *testing.T) { } func compareKeyMaterial(got, want *certprovider.KeyMaterial) error { - // x509.Certificate type defines an Equal() method, but does not check for - // nil. This has been fixed in - // https://github.com/golang/go/commit/89865f8ba64ccb27f439cce6daaa37c9aa38f351, - // but this is only available starting go1.14. - // TODO(easwars): Remove this check once we remove support for go1.13. - if (got.Certs == nil && want.Certs != nil) || (want.Certs == nil && got.Certs != nil) { + if len(got.Certs) != len(want.Certs) { return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) } - if !cmp.Equal(got.Certs, want.Certs, cmp.AllowUnexported(big.Int{})) { - return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) + for i := 0; i < len(got.Certs); i++ { + if !got.Certs[i].Leaf.Equal(want.Certs[i].Leaf) { + return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) + } } + // x509.CertPool contains only unexported fields some of which contain other // unexported fields. So usage of cmp.AllowUnexported() or // cmpopts.IgnoreUnexported() does not help us much here. Also, the standard @@ -165,12 +160,12 @@ func (wd *wrappedDistributor) Set(km *certprovider.KeyMaterial, err error) { func createTmpFile(t *testing.T, src, dst string) { t.Helper() - data, err := ioutil.ReadFile(src) + data, err := os.ReadFile(src) if err != nil { - t.Fatalf("ioutil.ReadFile(%q) failed: %v", src, err) + t.Fatalf("os.ReadFile(%q) failed: %v", src, err) } - if err := ioutil.WriteFile(dst, data, os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", dst, err) + if err := os.WriteFile(dst, data, os.ModePerm); err != nil { + t.Fatalf("os.WriteFile(%q) failed: %v", dst, err) } t.Logf("Wrote file at: %s", dst) t.Logf("%s", string(data)) @@ -185,9 +180,9 @@ func createTmpDirWithFiles(t *testing.T, dirSuffix, certSrc, keySrc, rootSrc str // Create a temp directory. Passing an empty string for the first argument // uses the system temp directory. - dir, err := ioutil.TempDir("", dirSuffix) + dir, err := os.MkdirTemp("", dirSuffix) if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) + t.Fatalf("os.MkdirTemp() failed: %v", err) } t.Logf("Using tmpdir: %s", dir) @@ -327,9 +322,9 @@ func (s) TestProvider_UpdateSuccessWithSymlink(t *testing.T) { dir2 := createTmpDirWithFiles(t, "update_with_symlink2_*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/server_ca_cert.pem") // Create a symlink under a new tempdir, and make it point to dir1. - tmpdir, err := ioutil.TempDir("", "test_symlink_*") + tmpdir, err := os.MkdirTemp("", "test_symlink_*") if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) + t.Fatalf("os.MkdirTemp() failed: %v", err) } symLinkName := path.Join(tmpdir, "test_symlink") if err := os.Symlink(dir1, symLinkName); err != nil { diff --git a/credentials/tls/certprovider/provider.go b/credentials/tls/certprovider/provider.go index 275c176afdf8..f24df7c5008b 100644 --- a/credentials/tls/certprovider/provider.go +++ b/credentials/tls/certprovider/provider.go @@ -18,7 +18,7 @@ // Package certprovider defines APIs for Certificate Providers in gRPC. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/credentials/tls/certprovider/store_test.go b/credentials/tls/certprovider/store_test.go index 00d33a2be872..54384e8225ef 100644 --- a/credentials/tls/certprovider/store_test.go +++ b/credentials/tls/certprovider/store_test.go @@ -1,5 +1,3 @@ -// +build go1.13 - /* * * Copyright 2020 gRPC authors. @@ -26,11 +24,12 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" - "reflect" + "os" "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/testdata" @@ -127,7 +126,7 @@ func loadKeyMaterials(t *testing.T, cert, key, ca string) *KeyMaterial { t.Fatalf("Failed to load keyPair: %v", err) } - pemData, err := ioutil.ReadFile(testdata.Path(ca)) + pemData, err := os.ReadFile(testdata.Path(ca)) if err != nil { t.Fatal(err) } @@ -154,15 +153,23 @@ func readAndVerifyKeyMaterial(ctx context.Context, kmr kmReader, wantKM *KeyMate } func compareKeyMaterial(got, want *KeyMaterial) error { - // TODO(easwars): Remove all references to reflect.DeepEqual and use - // cmp.Equal instead. Currently, the later panics because x509.Certificate - // type defines an Equal method, but does not check for nil. This has been - // fixed in - // https://github.com/golang/go/commit/89865f8ba64ccb27f439cce6daaa37c9aa38f351, - // but this is only available starting go1.14. So, once we remove support - // for go1.13, we can make the switch. - if !reflect.DeepEqual(got, want) { - return fmt.Errorf("provider.KeyMaterial() = %+v, want %+v", got, want) + if len(got.Certs) != len(want.Certs) { + return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) + } + for i := 0; i < len(got.Certs); i++ { + if !got.Certs[i].Leaf.Equal(want.Certs[i].Leaf) { + return fmt.Errorf("keyMaterial certs = %+v, want %+v", got, want) + } + } + + // x509.CertPool contains only unexported fields some of which contain other + // unexported fields. So usage of cmp.AllowUnexported() or + // cmpopts.IgnoreUnexported() does not help us much here. Also, the standard + // library does not provide a way to compare CertPool values. Comparing the + // subjects field of the certs in the CertPool seems like a reasonable + // approach. + if gotR, wantR := got.Roots.Subjects(), want.Roots.Subjects(); !cmp.Equal(gotR, wantR, cmpopts.EquateEmpty()) { + return fmt.Errorf("keyMaterial roots = %v, want %v", gotR, wantR) } return nil } @@ -335,7 +342,7 @@ func (s) TestStoreSingleProviderDifferentConfigs(t *testing.T) { t.Fatal(err) } - // Push new key material into only one of the fake providers and and verify + // Push new key material into only one of the fake providers and verify // that the providers returned by the store return the appropriate key // material. km2 := loadKeyMaterials(t, "x509/server2_cert.pem", "x509/server2_key.pem", "x509/client_ca_cert.pem") diff --git a/credentials/xds/xds.go b/credentials/xds/xds.go index 0243009df644..d232e6786746 100644 --- a/credentials/xds/xds.go +++ b/credentials/xds/xds.go @@ -18,11 +18,6 @@ // Package xds provides a transport credentials implementation where the // security configuration is pushed by a management server using xDS APIs. -// -// Experimental -// -// Notice: All APIs in this package are EXPERIMENTAL and may be removed in a -// later release. package xds import ( @@ -167,8 +162,10 @@ func (c *credsImpl) ClientHandshake(ctx context.Context, authority string, rawCo } // The SANs sent by the MeshCA are encoded as SPIFFE IDs. We need to // only look at the SANs on the leaf cert. - if !hi.MatchingSANExists(certs[0]) { - return fmt.Errorf("SANs received in leaf certificate %+v does not match any of the accepted SANs", certs[0]) + if cert := certs[0]; !hi.MatchingSANExists(cert) { + // TODO: Print the complete certificate once the x509 package + // supports a String() method on the Certificate type. + return fmt.Errorf("xds: received SANs {DNSNames: %v, EmailAddresses: %v, IPAddresses: %v, URIs: %v} do not match any of the accepted SANs", cert.DNSNames, cert.EmailAddresses, cert.IPAddresses, cert.URIs) } return nil } diff --git a/credentials/xds/xds_client_test.go b/credentials/xds/xds_client_test.go index 82cfa5876acb..2fd2e21cdd73 100644 --- a/credentials/xds/xds_client_test.go +++ b/credentials/xds/xds_client_test.go @@ -24,19 +24,19 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" "net" + "os" "strings" "testing" "time" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/internal" + icredentials "google.golang.org/grpc/internal/credentials" xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" ) @@ -159,7 +159,7 @@ func testServerMutualTLSHandshake(rawConn net.Conn) handshakeResult { if err != nil { return handshakeResult{err: err} } - pemData, err := ioutil.ReadFile(testdata.Path("x509/client_ca_cert.pem")) + pemData, err := os.ReadFile(testdata.Path("x509/client_ca_cert.pem")) if err != nil { return handshakeResult{err: err} } @@ -204,7 +204,7 @@ func makeIdentityProvider(t *testing.T, certPath, keyPath string) certprovider.P // makeRootProvider creates a new instance of the fakeProvider returning the // root key material specified in the provider file paths. func makeRootProvider(t *testing.T, caPath string) *fakeProvider { - pemData, err := ioutil.ReadFile(testdata.Path(caPath)) + pemData, err := os.ReadFile(testdata.Path(caPath)) if err != nil { t.Fatal(err) } @@ -221,15 +221,14 @@ func newTestContextWithHandshakeInfo(parent context.Context, root, identity cert // NewSubConn(). info := xdsinternal.NewHandshakeInfo(root, identity) if sanExactMatch != "" { - info.SetSANMatchers([]xds.StringMatcher{xds.StringMatcherForTesting(newStringP(sanExactMatch), nil, nil, nil, nil, false)}) + info.SetSANMatchers([]matcher.StringMatcher{matcher.StringMatcherForTesting(newStringP(sanExactMatch), nil, nil, nil, nil, false)}) } addr := xdsinternal.SetHandshakeInfo(resolver.Address{}, info) // Moving the attributes from the resolver.Address to the context passed to // the handshaker is done in the transport layer. Since we directly call the // handshaker in these tests, we need to do the same here. - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - return contextWithHandshakeInfo(parent, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + return icredentials.NewClientHandshakeInfoContext(parent, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) } // compareAuthInfo compares the AuthInfo received on the client side after a @@ -477,7 +476,7 @@ func (s) TestClientCredsHandshakeFailure(t *testing.T) { handshakeFunc: testServerTLSHandshake, rootProvider: makeRootProvider(t, "x509/server_ca_cert.pem"), san: "bad-san", - wantErr: "does not match any of the accepted SANs", + wantErr: "do not match any of the accepted SANs", }, } @@ -535,14 +534,13 @@ func (s) TestClientCredsProviderSwitch(t *testing.T) { // use the correct trust roots. root1 := makeRootProvider(t, "x509/client_ca_cert.pem") handshakeInfo := xdsinternal.NewHandshakeInfo(root1, nil) - handshakeInfo.SetSANMatchers([]xds.StringMatcher{xds.StringMatcherForTesting(newStringP(defaultTestCertSAN), nil, nil, nil, nil, false)}) + handshakeInfo.SetSANMatchers([]matcher.StringMatcher{matcher.StringMatcherForTesting(newStringP(defaultTestCertSAN), nil, nil, nil, nil, false)}) // We need to repeat most of what newTestContextWithHandshakeInfo() does // here because we need access to the underlying HandshakeInfo so that we // can update it before the next call to ClientHandshake(). addr := xdsinternal.SetHandshakeInfo(resolver.Address{}, handshakeInfo) - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - ctx = contextWithHandshakeInfo(ctx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + ctx = icredentials.NewClientHandshakeInfoContext(ctx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) if _, _, err := creds.ClientHandshake(ctx, authority, conn); err == nil { t.Fatal("ClientHandshake() succeeded when expected to fail") } diff --git a/credentials/xds/xds_server_test.go b/credentials/xds/xds_server_test.go index 5c29ba38c286..bc32a04e69a1 100644 --- a/credentials/xds/xds_server_test.go +++ b/credentials/xds/xds_server_test.go @@ -24,8 +24,8 @@ import ( "crypto/x509" "errors" "fmt" - "io/ioutil" "net" + "os" "strings" "testing" "time" @@ -39,7 +39,7 @@ import ( func makeClientTLSConfig(t *testing.T, mTLS bool) *tls.Config { t.Helper() - pemData, err := ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + pemData, err := os.ReadFile(testdata.Path("x509/server_ca_cert.pem")) if err != nil { t.Fatal(err) } diff --git a/default_dial_option_server_option_test.go b/default_dial_option_server_option_test.go new file mode 100644 index 000000000000..4af5c0b8772c --- /dev/null +++ b/default_dial_option_server_option_test.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "testing" + + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" +) + +func (s) TestAddGlobalDialOptions(t *testing.T) { + // Ensure the Dial fails without credentials + if _, err := Dial("fake"); err == nil { + t.Fatalf("Dialing without a credential did not fail") + } else { + if !strings.Contains(err.Error(), "no transport security set") { + t.Fatalf("Dialing failed with unexpected error: %v", err) + } + } + + // Set and check the DialOptions + opts := []DialOption{WithTransportCredentials(insecure.NewCredentials()), WithTransportCredentials(insecure.NewCredentials()), WithTransportCredentials(insecure.NewCredentials())} + internal.AddGlobalDialOptions.(func(opt ...DialOption))(opts...) + for i, opt := range opts { + if globalDialOptions[i] != opt { + t.Fatalf("Unexpected global dial option at index %d: %v != %v", i, globalDialOptions[i], opt) + } + } + + // Ensure the Dial passes with the extra dial options + if cc, err := Dial("fake"); err != nil { + t.Fatalf("Dialing with insecure credential failed: %v", err) + } else { + cc.Close() + } + + internal.ClearGlobalDialOptions() + if len(globalDialOptions) != 0 { + t.Fatalf("Unexpected len of globalDialOptions: %d != 0", len(globalDialOptions)) + } +} + +// TestDisableGlobalOptions tests dialing with the disableGlobalDialOptions dial +// option. Dialing with this set should not pick up global options. +func (s) TestDisableGlobalOptions(t *testing.T) { + // Set transport credentials as a global option. + internal.AddGlobalDialOptions.(func(opt ...DialOption))(WithTransportCredentials(insecure.NewCredentials())) + // Dial with the disable global options dial option. This dial should fail + // due to the global dial options with credentials not being picked up due + // to global options being disabled. + noTSecStr := "no transport security set" + if _, err := Dial("fake", internal.DisableGlobalDialOptions.(func() DialOption)()); !strings.Contains(fmt.Sprint(err), noTSecStr) { + t.Fatalf("Dialing received unexpected error: %v, want error containing \"%v\"", err, noTSecStr) + } + internal.ClearGlobalDialOptions() +} + +func (s) TestAddGlobalServerOptions(t *testing.T) { + const maxRecvSize = 998765 + // Set and check the ServerOptions + opts := []ServerOption{Creds(insecure.NewCredentials()), MaxRecvMsgSize(maxRecvSize)} + internal.AddGlobalServerOptions.(func(opt ...ServerOption))(opts...) + for i, opt := range opts { + if globalServerOptions[i] != opt { + t.Fatalf("Unexpected global server option at index %d: %v != %v", i, globalServerOptions[i], opt) + } + } + + // Ensure the extra server options applies to new servers + s := NewServer() + if s.opts.maxReceiveMessageSize != maxRecvSize { + t.Fatalf("Unexpected s.opts.maxReceiveMessageSize: %d != %d", s.opts.maxReceiveMessageSize, maxRecvSize) + } + + internal.ClearGlobalServerOptions() + if len(globalServerOptions) != 0 { + t.Fatalf("Unexpected len of globalServerOptions: %d != 0", len(globalServerOptions)) + } +} + +// TestJoinDialOption tests the join dial option. It configures a joined dial +// option with three individual dial options, and verifies that all three are +// successfully applied. +func (s) TestJoinDialOption(t *testing.T) { + const maxRecvSize = 998765 + const initialWindowSize = 100 + jdo := newJoinDialOption(WithTransportCredentials(insecure.NewCredentials()), WithReadBufferSize(maxRecvSize), WithInitialWindowSize(initialWindowSize)) + cc, err := Dial("fake", jdo) + if err != nil { + t.Fatalf("Dialing with insecure credentials failed: %v", err) + } + defer cc.Close() + if cc.dopts.copts.ReadBufferSize != maxRecvSize { + t.Fatalf("Unexpected cc.dopts.copts.ReadBufferSize: %d != %d", cc.dopts.copts.ReadBufferSize, maxRecvSize) + } + if cc.dopts.copts.InitialWindowSize != initialWindowSize { + t.Fatalf("Unexpected cc.dopts.copts.InitialWindowSize: %d != %d", cc.dopts.copts.InitialWindowSize, initialWindowSize) + } +} + +// TestJoinDialOption tests the join server option. It configures a joined +// server option with three individual server options, and verifies that all +// three are successfully applied. +func (s) TestJoinServerOption(t *testing.T) { + const maxRecvSize = 998765 + const initialWindowSize = 100 + jso := newJoinServerOption(Creds(insecure.NewCredentials()), MaxRecvMsgSize(maxRecvSize), InitialWindowSize(initialWindowSize)) + s := NewServer(jso) + if s.opts.maxReceiveMessageSize != maxRecvSize { + t.Fatalf("Unexpected s.opts.maxReceiveMessageSize: %d != %d", s.opts.maxReceiveMessageSize, maxRecvSize) + } + if s.opts.initialWindowSize != initialWindowSize { + t.Fatalf("Unexpected s.opts.initialWindowSize: %d != %d", s.opts.initialWindowSize, initialWindowSize) + } +} diff --git a/dialoptions.go b/dialoptions.go index e7f86e6d7c81..23ea95237ea0 100644 --- a/dialoptions.go +++ b/dialoptions.go @@ -20,22 +20,34 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" internalbackoff "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions +} + // dialOptions configure a Dial call. dialOptions are set by the DialOption // values passed to Dial. type dialOptions struct { @@ -45,20 +57,18 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -66,11 +76,9 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string - // This is used by ccResolverWrapper to backoff between successive calls to - // resolver.ResolveNow(). The user will have no need to configure this, but - // we need to be able to configure this in tests. - resolveNowBackoff func(int) time.Duration - resolvers []resolver.Builder + resolvers []resolver.Builder + idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -78,10 +86,12 @@ type DialOption interface { apply(*dialOptions) } +var globalDialOptions []DialOption + // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -89,6 +99,16 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -105,13 +125,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -121,8 +156,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s @@ -200,25 +236,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -232,18 +249,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } -// WithConnectParams configures the dialer to use the provided ConnectParams. +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. // // The backoff configuration specified as part of the ConnectParams overrides // all defaults specified in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider // using the backoff.DefaultConfig as a base, in cases where you want to // override only a subset of the backoff configuration. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func WithConnectParams(p ConnectParams) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = internalbackoff.Exponential{Config: p.Backoff} @@ -281,9 +294,12 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -295,7 +311,10 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // -// Experimental +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -307,18 +326,24 @@ func WithReturnConnectionError() DialOption { } // WithInsecure returns a DialOption which disables transport security for this -// ClientConn. Note that transport security is required unless WithInsecure is -// set. +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.insecure = true + o.copts.TransportCredentials = insecure.NewCredentials() }) } // WithNoProxy returns a DialOption which disables the use of proxies for this // ClientConn. This is ignored if WithDialer or WithContextDialer are used. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -349,7 +374,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { // the ClientConn.WithCreds. This should not be used together with // WithTransportCredentials. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -405,7 +430,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { // all the RPCs and underlying network connections in this ClientConn. func WithStatsHandler(h stats.Handler) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.StatsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl }) } @@ -417,7 +456,10 @@ func WithStatsHandler(h stats.Handler) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // -// Experimental +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -486,8 +528,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt } // WithAuthority returns a DialOption that specifies the value to be used as the -// :authority pseudo-header. This value only works with WithInsecure and has no -// effect if TransportCredentials are present. +// :authority pseudo-header and as the server name in authentication handshake. func WithAuthority(a string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.authority = a @@ -498,11 +539,11 @@ func WithAuthority(a string) DialOption { // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) @@ -523,14 +564,16 @@ func WithDisableServiceConfig() DialOption { // WithDefaultServiceConfig returns a DialOption that configures the default // service config, which will be used in cases where: // -// 1. WithDisableServiceConfig is also used. -// 2. Resolver does not return a service config or if the resolver returns an -// invalid service config. +// 1. WithDisableServiceConfig is also used, or // -// Experimental +// 2. The name resolver does not provide a service config or provides an +// invalid service config. // -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go func WithDefaultServiceConfig(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { o.defaultServiceConfigRawJSON = &s @@ -541,15 +584,6 @@ func WithDefaultServiceConfig(s string) DialOption { // service config enables them. This does not impact transparent retries, which // will happen automatically if no data is written to the wire or if the RPC is // unprocessed by the remote server. -// -// Retry support is currently disabled by default, but will be enabled by -// default in the future. Until then, it may be enabled by setting the -// environment variable "GRPC_GO_RETRY" to "on". -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func WithDisableRetry() DialOption { return newFuncDialOption(func(o *dialOptions) { o.disableRetry = true @@ -567,7 +601,7 @@ func WithMaxHeaderListSize(s uint32) DialOption { // WithDisableHealthCheck disables the LB channel health checking for all // SubConns of this ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -589,14 +623,13 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - disableRetry: !envconfig.Retry, healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, UseProxy: true, }, - resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, + recvBufferPool: nopBufferPool{}, } } @@ -611,22 +644,12 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } -// withResolveNowBackoff specifies the function that clientconn uses to backoff -// between successive calls to resolver.ResolveNow(). -// -// For testing purpose only. -func withResolveNowBackoff(f func(int) time.Duration) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolveNowBackoff = f - }) -} - // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the // current Dial only, and will take precedence over the global registry. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -635,3 +658,44 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/encoding/encoding.go b/encoding/encoding.go index 6d84f74c7d08..07a5861352a6 100644 --- a/encoding/encoding.go +++ b/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. @@ -108,7 +113,7 @@ var registeredCodecs = make(map[string]Codec) // more details. // // NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple Compressors are +// an init() function), and is not thread-safe. If multiple Codecs are // registered with the same name, the one registered last will take effect. func RegisterCodec(codec Codec) { if codec == nil { diff --git a/encoding/encoding_test.go b/encoding/encoding_test.go new file mode 100644 index 000000000000..38c31dcdddcc --- /dev/null +++ b/encoding/encoding_test.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/grpcutil" +) + +type mockNamedCompressor struct { + Compressor +} + +func (mockNamedCompressor) Name() string { + return "mock-compressor" +} + +func TestDuplicateCompressorRegister(t *testing.T) { + defer func(m map[string]Compressor) { registeredCompressor = m }(registeredCompressor) + defer func(c []string) { grpcutil.RegisteredCompressorNames = c }(grpcutil.RegisteredCompressorNames) + registeredCompressor = map[string]Compressor{} + grpcutil.RegisteredCompressorNames = []string{} + + RegisterCompressor(&mockNamedCompressor{}) + + // Register another instance of the same compressor. + mc := &mockNamedCompressor{} + RegisterCompressor(mc) + if got := registeredCompressor["mock-compressor"]; got != mc { + t.Fatalf("Unexpected compressor, got: %+v, want:%+v", got, mc) + } + + wantNames := []string{"mock-compressor"} + if !cmp.Equal(wantNames, grpcutil.RegisteredCompressorNames) { + t.Fatalf("Unexpected compressor names, got: %+v, want:%+v", grpcutil.RegisteredCompressorNames, wantNames) + } +} diff --git a/encoding/gzip/gzip.go b/encoding/gzip/gzip.go index ce2f15ed288f..a3bb173c24ac 100644 --- a/encoding/gzip/gzip.go +++ b/encoding/gzip/gzip.go @@ -19,7 +19,7 @@ // Package gzip implements and registers the gzip compressor // during the initialization. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -30,7 +30,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "sync" "google.golang.org/grpc/encoding" @@ -42,7 +41,7 @@ const Name = "gzip" func init() { c := &compressor{} c.poolCompressor.New = func() interface{} { - return &writer{Writer: gzip.NewWriter(ioutil.Discard), pool: &c.poolCompressor} + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} } encoding.RegisterCompressor(c) } @@ -63,7 +62,7 @@ func SetLevel(level int) error { } c := encoding.GetCompressor(Name).(*compressor) c.poolCompressor.New = func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } diff --git a/examples/README.md b/examples/README.md index bb2138f26ffb..2a5c88cd1cbe 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,29 +1,10 @@ -# gRPC Hello World +# Examples -Follow these setup to run the [quick start][] example: +The following examples are provided to help users get started with gRPC-Go. +They are arranged as follows: - 1. Get the code: +* `helloworld` - a simple example showing a basic client and server +* `routeguide` - a more complicated example showing different types of streaming RPCs +* `features` - a collection of examples, each focused on a single gRPC feature - ```console - $ go get google.golang.org/grpc/examples/helloworld/greeter_client - $ go get google.golang.org/grpc/examples/helloworld/greeter_server - ``` - - 2. Run the server: - - ```console - $ $(go env GOPATH)/bin/greeter_server & - ``` - - 3. Run the client: - - ```console - $ $(go env GOPATH)/bin/greeter_client - Greeting: Hello world - ``` - -For more details (including instructions for making a small change to the -example code) or if you're having trouble running this example, see [Quick -Start][]. - -[quick start]: https://grpc.io/docs/languages/go/quickstart +`data` is a directory containing data used by the examples, e.g. TLS certificates. diff --git a/examples/data/x509/README.md b/examples/data/x509/README.md new file mode 100644 index 000000000000..3b9a05dac364 --- /dev/null +++ b/examples/data/x509/README.md @@ -0,0 +1,6 @@ +This directory contains x509 certificates and associated private keys used in +examples. + +How were these test certs/keys generated ? +------------------------------------------ +Run `./create.sh` diff --git a/examples/data/x509/ca_cert.pem b/examples/data/x509/ca_cert.pem index eee033e8cb05..868a01eb92f9 100644 --- a/examples/data/x509/ca_cert.pem +++ b/examples/data/x509/ca_cert.pem @@ -1,34 +1,34 @@ -----BEGIN CERTIFICATE----- -MIIF6jCCA9KgAwIBAgIJAKnJpgBC9CHNMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV +MIIF6jCCA9KgAwIBAgIJANQvyb7tgLDkMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD -MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMDA4MDQwMTU5NTdaFw0zMDA4 -MDIwMTU5NTdaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD +MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMjAzMTgyMTQ0NTZaFw0zMjAz +MTUyMTQ0NTZaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMZFKSUi+PlQ6z/aTz1Jp9lqrFAY -38cEIzpxS9ktQiWvLoYICImXRFhCH/h+WjmiyV8zYHcbft63BTUwgXJFuE0cxsJY -mqOUYL2wTD5PzgoN0B9KVgKyyi0SQ6WH9+D2ZvYAolHb1l6pYuxxk1bQL2OA80Cc -K659UioynIQtJ52NRqGRDI2EYsC9XRuhfddnDu/RwBaiv3ix84R3VAqcgRyOeGwH -cX2e+aX0m6ULnsiyPXG9y9wQi956CGGZimInV63S+sU3Mc6PuUt8rwFlmSXCZ/07 -D8No5ljNUo6Vt2BpAMQzSz+SU4PUFE7Vxbq4ypI+2ZbkI80YjDwF52/pMauqZFIP -Kjw0b2yyWD/F4hLmR7Rx9d8EFWRLZm2VYSVMiQTwANpb+uL7+kH8UE3QF7tryH8K -G65mMh18XiERgSAWgs5Z8j/B1W5bl17PVx2Ii1dYp0IquyAVjCIKRrFituvoXXZj -FHHpb/aUDpW0SYrT5dmDhAAGFkYfMTFd4EOj6bWepZtRRjPeIHR9B2yx8U0tFSMf -tuHCj95l2izJDUfKhVIkigpbRrElI2QqXAPIyIOqcdzlgtI6DIanCd/CwsfdyaEs -7AnW2mFWarbkxpw92RdGxYy6WXbdM+2EdY+cWKys06upINcnG2zvkCflAE39fg9F -BVCJC71oO3laXnf7AgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUBuToaw2a+AV/vfbooJn3yzwA3lMwgYAGA1UdIwR5MHeAFAbk6GsNmvgFf732 -6KCZ98s8AN5ToVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANGmhBQQ5f3n4UhgJLsXHh3CE3ej +Ox36ob+Hnny9Gb/OquA4FMKjTTaSrhKIQapqlCLODai50XKSRBJcgsvsqWk9UdL2 +3zf7CzAPmg5CmzpWWwgpKPTuK5W+gLA1+uMKecBdH5gqSswQ3TD1fMfnJuq9mNfC +GsMkplaqS5VATNFPVnqS7us3OXKEITmBaQP4wOpGP1PgqX7K08aZEeAyQJaTS5um +4MNlBLYa/nQ9Wca0Uk5tzoNjE6mWH7bTuwdoZgOIwKFmBbmsC9y/HzwV/zRsL8Yp ++7FwfIYuZ5j8gBNqSFQjDFkm6Q7RcQ/lyHHj9YduOgTciIFVgx+j8aZvFqH127h8 +WIb7Jppy0DEDJE1hRP6iV2uVoaUxhXWrCWLBUU+naLix7SJ8rqw8gHwRNWfM/Lwg +I3rGXdw5WIHVQcuxevN6qVSZeWVYAlAgfxjKtM5cKZyM+W80CSdVKEku1XA0sq6h +jaiJdo6hpm8BLIB2k7LWafc5MASst7XULk4uDC/OYcEz3+C3Ryn1qBltr1gA3+5K +ANuhjYCZH4P0pX08I1MpeVP6h8XhbBPEZg2txbVGlnDXEFoJN9Eg5iEKRBo/HKhf +lP84ljtBSmCnsF6K/y3vnRiu+BVNP5KMq179DNqEy7tSygzgY41m3pSFojdvA59N +JWJoy9/NZzdlU4nzAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUW5AMXXg/zPSaLHwSO/7LwoBeZYUwgYAGA1UdIwR5MHeAFFuQDF14P8z0mix8 +Ejv+y8KAXmWFoVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1zZXJ2ZXJfY2GC -CQCpyaYAQvQhzTAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBALUz -P2SiZAXZDwCH8kzHbLqsqacSM81bUSuG153t3fhwZU8hzXgQqifFububLkrLaRCj -VvtIS3XsbHmKYD1TBOOCZy5zE2KdpWYW47LmogBqUllKCSD099UHFB2YUepK9Zci -oxYJMhNWIhkoJ/NJMp70A8PZtxUvZafeUQl6xueo1yPbfQubg0lG9Pp2xkmTypSv -WJkpRyX8GSJYFoFFYdNcvICVw7E/Zg+PGXe8gjpAGWW8KxxaohPsdLid6f3KauJM -UCi/WQECzIpNzxQDSqnGeoqbZp+2y6mhgECQ3mG/K75n0fX0aV88DNwTd1o0xOpv -lHJo8VD9mvwnapbm/Bc7NWIzCjL8fo0IviRkmAuoz525eBy6NsUCf1f432auvNbg -OUaGGrY6Kse9sF8Tsc8XMoT9AfGQaR8Ay7oJHjaCZccvuxpB2n//L1UAjMRPYd2y -XAiSN2xz7WauUh4+v48lKbWa+dwn1G0pa6ZGB7IGBUbgva8Fi3iqVh3UZoz+0PFM -qVLG2SzhfMTMHg0kF+rI4eOcEKc1j3A83DmTTPZDz3APn53weJLJhKzrgQiI1JRW -boAJ4VFQF6zjxeecCIIiekH6saYKnol2yL6ksm0jyHoFejkrHWrzoRAwIhTf9avj -G7QS5fiSQk4PXCX42J5aS/zISy85RT120bkBjV/P +CQDUL8m+7YCw5DAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAKTh +Ofg4WospSN7Gg/q3bQqfSMT5XTFC7cj0j3cWDZBnmqb0HAFPmzHT+w3kBVNCyx1r +iatOhaZRH7RA0vacZQT5pD2MGU48/zFfwBV/qHENQWuRLD2WOOEU3cjjoINBclfP +im7ml/xgz0ACOgUyf+/2hkS7VLq4p9QQVGf2TQt65DZA9mUylZTdsBf4AfEg7IXv +gaYpq6tYmNi7fXDzR/LT+fPd4ejQARy9U7uVhecyH9zTUMzm2Fr/p7HhydSXNwhF +JUfPWw7XYO0lyA+8PxUSAKXOfsT44WNtHAeRm/Gkmn8inBdedFia/+M67k45b/wY +RF11QzvaMR33jmrdZWxCc0Xjg8oZIP7T9MfGFULEGCpB3NY4YjnRrid/JZ/edhPR +2iOiEiek4qAaxeIne3CR2dqCM+n+FV1zCs4n3S0os4+kknnS5aNR5wZpqpZfG0Co +FyWE+dE51cGcub1wT1oi5Xrxg/iRteCfd33Ky668FYKA/tHHdqkVfBflATU6iOtw +dIzvFJk1H1mUwpJrH/aNOHzVCQ5KSpcc+kXcOQPafTHFB6zMVJ6O+Vm7SrqiSENM +2b1fBKxHIsxOtwrKuzbRhU5+eAICqwMd6gcIpT/JSR1r+UfHVcrXalbeazmT2DS5 +CFOeinj4WQvtPYOdbYsWg8Y9zGN4L9zH6GovM1wD -----END CERTIFICATE----- diff --git a/examples/data/x509/ca_key.pem b/examples/data/x509/ca_key.pem new file mode 100644 index 000000000000..4dccea1be392 --- /dev/null +++ b/examples/data/x509/ca_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDRpoQUEOX95+FI +YCS7Fx4dwhN3ozsd+qG/h558vRm/zqrgOBTCo002kq4SiEGqapQizg2oudFykkQS +XILL7KlpPVHS9t83+wswD5oOQps6VlsIKSj07iuVvoCwNfrjCnnAXR+YKkrMEN0w +9XzH5ybqvZjXwhrDJKZWqkuVQEzRT1Z6ku7rNzlyhCE5gWkD+MDqRj9T4Kl+ytPG +mRHgMkCWk0ubpuDDZQS2Gv50PVnGtFJObc6DYxOplh+207sHaGYDiMChZgW5rAvc +vx88Ff80bC/GKfuxcHyGLmeY/IATakhUIwxZJukO0XEP5chx4/WHbjoE3IiBVYMf +o/Gmbxah9du4fFiG+yaactAxAyRNYUT+oldrlaGlMYV1qwliwVFPp2i4se0ifK6s +PIB8ETVnzPy8ICN6xl3cOViB1UHLsXrzeqlUmXllWAJQIH8YyrTOXCmcjPlvNAkn +VShJLtVwNLKuoY2oiXaOoaZvASyAdpOy1mn3OTAErLe11C5OLgwvzmHBM9/gt0cp +9agZba9YAN/uSgDboY2AmR+D9KV9PCNTKXlT+ofF4WwTxGYNrcW1RpZw1xBaCTfR +IOYhCkQaPxyoX5T/OJY7QUpgp7Beiv8t750YrvgVTT+SjKte/QzahMu7UsoM4GON +Zt6UhaI3bwOfTSViaMvfzWc3ZVOJ8wIDAQABAoICAQCxi7A9AhaUUWRzE6DnpGtH +zk0IO39cIx4KAsNQZiDBVDdXzYafUwaX2d57KVNbDAlJ9HCS3FKpEX9+gUPviQvr +aRe7boCZewv9dqkDvJqS7AEJxzm9O1pD5WI8WGqRDhUPuI2CIwbXDM0VokA7VuGZ +WFlxFxvs+UO5D10VF7A2blcRVQ/quQj4lzc/6P1TdL2DaVxGH3PLQd/ZR1ZhJI2Y +N0OHnOqp7wnvYqrtK+u0oI83hjym/ifvrYhMH8E7Q8lo4s4noSvmEvK0zlKYYxSO +g7RtwK47lcSPKgtn/yZDyvVX85qIgbBLcUmrqfB3qxMKz2lpJo6f4Rg7mm6SgW+K +zxYnGNCTPfiyPKiufM3rQPfJ4giqQ1XDKiZEKUJBo4mzzV6LcAoDaEqhHBlySpi3 +Z38I0rmAT62PRJ1sMkQl6j1Ben9TpwTzJmLX1sEO1Jsabsk8rRdV+ni5oRRUdW4H ++ratyQ8pmegLYyhAZqkD7FzKBLdznLmWXVTcBQkRoD5lQkCP2OF78TdL4twNvoTH +X4kQ3cNysWFXsm+yf4jSCHl4BEtGA2jOU690T0trtMf13aI3wEULmcBgc2ix+tch +wX79hwBYcjGGDfTMb39r/DrcgWMVFXawru78QFoN9vVxznit9LrOERBm6zN2ok4X +E1kD4YZGr8dxUHax0or4CQKCAQEA7W1Sxeqc0gV0ANQf3eCsFNjvT97z/RSzzUYF +wCe4rpzQ9ZNsY2UYMYmEzUuRBuQxYKCNTWot3hu+6OPMCp4pLuu2l8ha/wCM2TkY +6hceduvXkdUNUG1xZNSR8waw4PTXNeoOD30+GB4OpHdjzsF5pEzx853/Qo/ERJFx +A+aZZJy/Sfw82KTseYTniWYjH4iYUbC8TVLfRjPw6V2VcF78pYkdAQenGglqw/sI +4a3FhJspN9xV/PoPbb7PjBJFHUt7ZRQt+D3WPuhLSjyPxwV+3u2OsQ1/J/sxcih6 +rW2g+OJYrK4YkOqX9tLRB39RjO4H6Eiv5eUAw/+vHHufKRu1HwKCAQEA4gzxZNzm +r1X/5GAwwyBJ4eQUHFvEQsC2L4GTJnNNAvmJzSIWnmxGfFLhfJSabnlCMYelMhKS +Ntxokk5ItOhxlUbA1CucEtQgehJwREpUljlk7cii5MLZEkz11QxIVoAhGlq3svFG +B/gwYWNVWl2CXcK2o6BBD9sIgzgp7qhmdJej16h8YkWn7HibKs+OBcdCu+ri7wU+ +VdLpdhN3uqo1b1tO58Gv+40vuQE3ZKDdMy55V30+0qEqg6dXvDQ9nwYFkw6C31Ad +Wpa9ZB0A0HNSou1xTWyl/hDie6dlN84RHGX8on4sjgPrb8A8WVis+R2abvh9ApZA +fRZ3H/ZYXB1crQKCAQBgjgEHc+3qi0UtwRZkiSXyJHbOKIFY/r5QUJWuG3lDqYph +FF8T3N0F6EMVqhGEl/Bst14/iVq15Nqyo1ErUD63UiyjdVtsMLEW9d1n9ZbyDd9Q +8y/C8X8X3kqsZqAwG+IZjuHA8tH5xN93iwYP4yaw5onO5QYV75mFuRAY4gKnpAc2 +81lbUVbJ5H60pdDK1iX7ssAhQf6C8kSa4vAPDtH4D9a3wID4WbQNl115Sc31q5QL +n5NomdkEbIDDGfr5euTnqlk3hw5F7voPaqmd6mI6Dqnk3vRDMihdoJCjTt4T2Rju +wK5E4OKEAh/3yJNFmNemY0kFWSgCjUyNbMjBUv9JAoIBAQCYS9QO+m1JUA2ZVd1E +eWqNkFakTIdL2f5kv03ep+wIxwq6c+79SUGr3UMh5hStvXCFYjhAJhbwc0rY13lQ +uRJdWk/sIn2CifxfgjC1MccPdxeyxGxK56PMGqG9qgrKjITA9sGxA7EFCYe+9We5 +/Coq9VaLoxpyjkWL8rj9m+N7RfcTAubaZseeIBuamj+7UOZ7KOM/2i6HMBQugys1 +Thu2LLRanDnups6yPEmPuHmPVA5YjX9X9VFpZcNMf33MuAflbe9qeNVuBQUQgCHe +TvQr5QFjAoJLTCDq4nrlQCZzFZtB9vQZsjZbEg8WuxG+vN0hSrUemxBTtmEH3bbm +SLn5AoIBABGxznQFXXlF3eLIZqLvItDMSTpFp8YPk8GQWPT2V3pNNjvK/j7eg+tn +VouXv5LjyLTzWLKnPjIU4t+qwu6R9nohZ62OjGl6lssVdjPnf4R6UKzRa0iIZtH4 +BlGncnAbzb6TJuLX7dNwICoUCGyvk9tdnThH1FY3ZAEhOi1G8LEh7aBrj9/vUZ2d +S5jzZ7kLh04AB8OP1MXM3sZE7VlIxUtT/NLlwC8zRsg84pAjg3U7PygIDYQDzCRB +4yIvDziTPqDB/vdCKt7/Xary5Xj4NwqcPCRf6HvdHYCVeW7V+mWcMKZgodQARQhv +qQCK9iiN08MAFNia/0/Bj4D7XKurNRY= +-----END PRIVATE KEY----- diff --git a/examples/data/x509/client_ca_cert.pem b/examples/data/x509/client_ca_cert.pem new file mode 100644 index 000000000000..62a0ce0545a6 --- /dev/null +++ b/examples/data/x509/client_ca_cert.pem @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF6jCCA9KgAwIBAgIJAOhoXtjjP6JdMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV +BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD +MRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTAeFw0yMjAzMTgyMTQ0NThaFw0zMjAz +MTUyMTQ0NThaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD +U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAO7fTqeU+8OfKMwXABNF90+RYL4X +YS4ULx4rpf14Ntp1SF6o3itCSM3jJfHzexj2Pm16aL+OQll8ODtvTadqVSMndMCn +UN/jVjxiMmjkSNKpwUGG69CsQzCKoueKBCEy/CZSopQae6Wxn7mqTAzhFlh3idNL +J+12UtdqDxnPDsiG2XBET3UrKyJeBxMgRyPi/g4wHfhH9oJ97jkdacUlLko8l22s +ZiMSSwwOlWxtTY5t0FbHu08ufP4eYTqC0LL3z1Fon4v+4BqUyK7BT3dISwPBmSd1 +uTD7Wbaa/QmfU6Y18dkNlK00GUAcKWgPfLcm7EH/AAz5XkqozVR3z5FLBYFTxVrA +Ly/Gu5HLx/uwoYWeYRWBOSkqvdgf9PT57imO4fOi1CTQuq/1LAdaxGkm7yXaz0YP +ySTiT6PvcLWFEbjrbufxdBrF4/ZsQz5vdJiKq2IQmCIKONJOFHWqgoF4AA7Ze1cl +mrK0eLzUlG1WmSy5mpjByRanahQWYvK1s0tc8IwMRRJY4DS6Dp99EVyteKZP/jc0 +x+ILet2ThDhjY3AxtkzlejyylABgl2AyGoGzZzbaf1q/0LfM6SfYBSVZK3TFR3Kt +8lQnG0tztoM+bnM/JZ8UZ61s16jJVxWzlZ+rx8rCpIvh3Cnl52DGo6oA4Kt60uDP +3iiTLGNYqEyHmzgnAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQUdOqNqaSjcn7BRN3fLs4eTIp1W9MwgYAGA1UdIwR5MHeAFHTqjamko3J+wUTd +3y7OHkyKdVvToVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV +BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1jbGllbnRfY2GC +CQDoaF7Y4z+iXTAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAOnH +CrwiJd51oBic5PwjQBhQcUtGOfR1BJe/PACpLXTf1Fbo8bLT5GxZLATlw9+EVO9P +JhhH+oiUuvA7dE2SRiZXpY7faqtDgvVfssyCrvACkM7pcP9A5kM4LiunX7dpY2xp +naJAqDV5Av1mOohHuVEZHqV6xQSREQFW2IusfpCsPP+P+RPKM2o571e6oz5RGbuP +dQ39QycBTK8ezccxaDaH614peAnBi4Q1GuxzgNmXq2FPDcf7F1QcWMrW3jUI8npi +Q9rXRwrqUYP7Yzz+dIziGdpOfZd7x/MyCXuqRdFdA+bulGM2Es5lvtguPOFhcWp0 +3hzLJ+yolxyqxnNNdaU0r+TDbgxOBjw0VxahuhzFDeZsP6Civzp+Y6MRdvofNXBm +IBD4uqmQtUUyE2uoznXvZkXaSc+0VIGgs04AMS9irBC2oVEGDp0AbelcIhdgToam +/NTuOmxgadwDuEn3TIFYkzx84J81kL8g0HQ1N09nSXChkSVb+XlxC+Wosxoazydr +M4FOvaa1V4vnmIdA2aF1nWTzJNcc9FC23zTmQkV2YJ1IKNmxGd3xBZzUtUBu5OgZ +vPXECtUjRcraNuXeL6gSX0qBaaVkcdxhp8CpI8k6Qb+mgOaq/ixrVEKtczBVXjHD +pO6QmwMZtqR8JsStbMCYXa2owt4k8F3yMlIKE6qX +-----END CERTIFICATE----- diff --git a/examples/data/x509/client_ca_key.pem b/examples/data/x509/client_ca_key.pem new file mode 100644 index 000000000000..77065d5cc8a9 --- /dev/null +++ b/examples/data/x509/client_ca_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDu306nlPvDnyjM +FwATRfdPkWC+F2EuFC8eK6X9eDbadUheqN4rQkjN4yXx83sY9j5temi/jkJZfDg7 +b02nalUjJ3TAp1Df41Y8YjJo5EjSqcFBhuvQrEMwiqLnigQhMvwmUqKUGnulsZ+5 +qkwM4RZYd4nTSyftdlLXag8Zzw7IhtlwRE91KysiXgcTIEcj4v4OMB34R/aCfe45 +HWnFJS5KPJdtrGYjEksMDpVsbU2ObdBWx7tPLnz+HmE6gtCy989RaJ+L/uAalMiu +wU93SEsDwZkndbkw+1m2mv0Jn1OmNfHZDZStNBlAHCloD3y3JuxB/wAM+V5KqM1U +d8+RSwWBU8VawC8vxruRy8f7sKGFnmEVgTkpKr3YH/T0+e4pjuHzotQk0Lqv9SwH +WsRpJu8l2s9GD8kk4k+j73C1hRG4627n8XQaxeP2bEM+b3SYiqtiEJgiCjjSThR1 +qoKBeAAO2XtXJZqytHi81JRtVpksuZqYwckWp2oUFmLytbNLXPCMDEUSWOA0ug6f +fRFcrXimT/43NMfiC3rdk4Q4Y2NwMbZM5Xo8spQAYJdgMhqBs2c22n9av9C3zOkn +2AUlWSt0xUdyrfJUJxtLc7aDPm5zPyWfFGetbNeoyVcVs5Wfq8fKwqSL4dwp5edg +xqOqAOCretLgz94okyxjWKhMh5s4JwIDAQABAoICAAmMq9xPPHFpn3vpP3uFxIlN +yoxO6veonumZ3Rzw/WBmZ+pA3gDkuXxhpFaz4SvyTDScPCvMSCLDsIvPu08CFT0+ +ipBZIAaTVBM96b3/wlmJp8wy1KKXAGikYjbXcarSGvp9OzqohGDvZO9LO5cYOIh4 +3u2vh30ayd0KxGfHu1OQ8IhocrTAcQ0CrU26cJ2iqX1vtwMB/XziA/AMmPnkrqER +IwyjY8HrLUziGF8pT3xuL3IIshhMR3rxQ/nO2QEOnx8mC5rRKaxmXk9+MusV3Mnd +p33IWwr2QXPnZk5ILFPsvCptPJBgENJbTdx3IglAaRmKVDowjfB2Jx9FWur4ENQy ++yCzf0ygRoXnugtwE48/L7P8mlqZlZsxQbUUjXEPtht8rtM4CR5b0v7PHXiLh1oM +igfy1RDAQAZQRGIlWCOeV2soiyKLnCGyAaVXcM2ksDkYOSH4ObE4KwF1Ph87lNaG +ywolsPvQD0ygymXcuStrYHWamTp8qRjNvZBcThs3SaKN+lxXxPng2tBPUwU0S6nj +e0pjWco74elBk+fjjd0wNolKjUD7FhRXlWiXz9BgcCjRD9TLoVk8mp9cFL7OLzJc +735JmNKP8C5Qs91Ugo6Z9tWQQTdGHZe9ElUY0fWP0bs+4iBaadl63R26tchLncZE +LnYsi2AjDdV908cEkAiBAoIBAQD6LbGeyFHZA42nuSw/NFsMVldqU6QwmADQI3Tw +JEdw2thS8VIX2c8aeJkVL++dNmSPcqs4NqhzgJSm9o1xNqGZovAPK/B3NmLl1kzG +JPwSr8QwNxmKwUlbt1K48qIV0JmetOgRG/ll5ux2CxgWHzwgRwtvpbnxDa7Gf7BA +UfH7AfZJ3iV+HlJSxr9XxNgFoNEtpP9sqbOgt10f5JJlIELCTa38iMBojAGxlzyj +7DGYY/diQDr+6mRNnv2pY57dOnmdvN1w+p1W7saaeRCeltva/G+5n5AWMFl5qBjT +LDktBE+okH5wapkUsZzZTByTgFXdBC2wY2qBrOexBAyS8/F3AoIBAQD0bkNBc1ya +KYmWlCsVSUZxUGSOp9g7ZdzlB/1G523s3PltXSphsC4mACs7ZAs5OAO/bu05kurp +dOqEAxsC05IxD2/gGoarC6QfTum9CMNoKrvtczA7Gl+6D5djum17lULY6YSBO75J +L0FQK6nCVGfAbBRAqhiFi+9kXvNThuqjgoiCNwQYxaG8aovoAKTFdkzQjDw2tUgM +jqCM6ifOBJIRolFq2CBom8nB+wpsI1naFLaOdg0Luz/Ds03gD9nWa6a4XIowKCml +Tek1Q+S2hZoTgfOlKRbCcM1KyoaI9LKI/pbKmpNyyrADw/kZKevfsKnYwMpHlaTR +NSuQ2VJKuxrRAoIBAQCBQ3bQ+eQAYyugC7dm+OBKYZpNH+ZoDUHuSUO0iKo5D3pS +cMnf9PRjUwiVv+zoqCARVkhNhUBIXZlxI1c1teqNfXjX/fYDQqCa7L1Ca/2qkhKm +bvHNlc0XjIM7eHJzHxMgw4xcur2D/2sSGu1ZEM56RvsLtu96M32opnUk5rJG5V6i +EBwDLBuRFYvsB5MuZUdvdB9dv9lGIzgEsI9LnP2hc42APBBedGizn9b/Q5zkhlJd ++53/9I/a41lhWk3NNNd9vwYTyAnfzwPi8Ma7imsSnPgFSwKh1F2G1GnvQpxQPDgE +epQ59XofDR5j0EW7mMXEqtIIn3V6hyI3fkYY795FAoIBAQCsx7x26YsN1krRzA7g +TxmiQ8exJ2gsJIcOxqT8l98WTeVqry6kOxuD9R6aLs/YNIZBrbG2vuma+PBFPMS9 +LLzsPRNCAL4s7l+nWerTmvw2B+8rm/796Fi+dwL2lfOKJipIllj52TdbGDI874Bi +Q7PLSxrN0u7eh9pCwvORmY8G4eCI20bkE9+OBmq7JqlSg5ss19RAf8hcR/2pXmOg +t45hNLIEqp3OFEF8A26MnjiHdZjN/xidsFEUjwx/U/USIqqJK7Dq9ZjqprYw1rs3 +Yh1VqMiHeRIDhCU5twt+iCojuILy2G1d+XSOVNsiNIXtaz3EYBMcouUMlV8kVtpa +xQPhAoIBAEr8U7ZaAxN2Ptgb6B8M1CVNE6q7S1VuX+T8xkciadW2aRjJ3PufFfsk +Zo12fP9K/NeOPTIz0dQB6Gy/CKzDLb8NnJCJnCUUaO8E45C2L9r6qvIJpXWHp3vo +neGO49y/5st7suOZkWU2B6ZGwNWH90296mfSKcUNxSRMaHCotPdVDyvOgLC24ZWR +6teRaxB2sVZYqmoz+4+G8SOK40bHJKf1kwujbrS3OqzDzEeC/STtqYZWPW03MFkk +MBPQvwCWMJINv4zz4YrnOaA9COc1/fTXCG5kKYyalPD8VKxi1usas1pZwIqZkuwm +D6kBMuZ4gkKW24IYzXzOni0/BOnpOfM= +-----END PRIVATE KEY----- diff --git a/examples/data/x509/client_cert.pem b/examples/data/x509/client_cert.pem new file mode 100644 index 000000000000..e35b94b1f27a --- /dev/null +++ b/examples/data/x509/client_cert.pem @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx +CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIyMDMxODIxNDQ1OVoXDTMyMDMxNTIxNDQ1 +OVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAL2ec6a93OYIioefCs3KRz752E5VfJPyVuxalBMc +7Dx84NsdwpbUyDT6fO7ePYM8IvYAsLc5coLCP1HKGGRmYm423WZf8Kn93BDl0XcN +4bgtW9ZrekvYcXqSzygz3ifdQeZljZrqW43dkkYR2vWc+uJXs+vrRVZyUSLLbe97 +9zUbWbOfHBc1jK1vTUakl08VhllYbO0m0SYZIni0sioItVdVWTz9XE2COavLqwwL +MIq8N7JXEdYJC49JWfdzvqZYTxOn5FSTCWen7/mcZmuLYPwUCkSu05M5T2o1ygkd +ohA+/X9yjToPJ7NO509lKHWo7+sp9if6jZsiOU45/t84pD6juVZSZ20/A9i6hjtj +C0SqYk2iQEtRp+lT6yYa5ffeNllFUGtM+xq2are2n93PnXwMTUlYGuTtkyRPG717 +ZtQjKQuwfdJNoNbJl2cfQpmtLdm4Jzrg5cWiiFro+aqnZxIfUEEDkIBaUjYmwMkS +Qq+S32L4f4u7rtbnzdo/jVwq0wpSjTGQJEab+v2wZpDhVbQblTyI30A+TvBIzLil +09OX49/teZCp05kOJy0V/yXdQtPwlQGXdsCUmD6dnGav17fB1witXDdG+4SNoyF/ +PN+8wtlMQ8fWvLdxLsd/Rq6CEZQV9mBhrQxXUmFFDhd0O6wfxR/lVFxIWg70Fz7P ++z7tAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFG0psrHrGny8ziVm +RtulG3f9ROrhMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAtr1dzSQswIOlEGlLtoAwkL7ys/gP2fcdh7Jl +ggiPs266yzZFyGGdd2GKo6tcjdBNjfnO8T5h8eLzj7QlzKPqA/l0BgAW7s7WX9QF +wCivw1DHE815ujlQNo3yve38pd2/I0hdf9GtQLGyOirYpwW5YcHvpmLezrW6J3UU +CWIfYhqO6bSs+HCLkvQdsCG1TpveWYXfC9aXHjw+ZGOjBMEt6AgdWctwzTjQfZub +VjZosBC3ZkDjkA9LTqKP5f8XSWt89J4JCYkiFRiJuYYiNYcZpb0Ug93XjEHIHXMG +N/cD9fCB2HovoVu8YnezpSrqEhqEikHSq80fwbf+NaT0CEbPMx3UMzt8d8gwUiwE +nzzf/o4uOwoofNWfka0J1VPY1AtjUDvz44LyVhp4uvkEJEK1WQ46mM68H/EOUmpd +fHANEbV8HLq2iOjR78n5+MCHRcX7duScp5wT0ajfDg41VrhvV/u7YctFj8ynQJg5 +cqbH+GgTrEfAFFm5mZH1SGqNPyxr1eQFWXMRGE7R/NoyQo2uqrSRmz6JFXlnWtxF +YmLhnOdQaytcpiYN2YVyC/rLK3l3Tbh4u5axvlZP/hi+nQluiZzkH97iUqXcBU/9 +jYNohnJzXMHTIZM8FQY+9uGw9ErdDo7FmX5Xkp4TzEz9k10m1fnt0njSEzITtqpg +MoO9n00= +-----END CERTIFICATE----- diff --git a/examples/data/x509/client_key.pem b/examples/data/x509/client_key.pem new file mode 100644 index 000000000000..d9c4bae3bbb5 --- /dev/null +++ b/examples/data/x509/client_key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAvZ5zpr3c5giKh58KzcpHPvnYTlV8k/JW7FqUExzsPHzg2x3C +ltTINPp87t49gzwi9gCwtzlygsI/UcoYZGZibjbdZl/wqf3cEOXRdw3huC1b1mt6 +S9hxepLPKDPeJ91B5mWNmupbjd2SRhHa9Zz64lez6+tFVnJRIstt73v3NRtZs58c +FzWMrW9NRqSXTxWGWVhs7SbRJhkieLSyKgi1V1VZPP1cTYI5q8urDAswirw3slcR +1gkLj0lZ93O+plhPE6fkVJMJZ6fv+Zxma4tg/BQKRK7TkzlPajXKCR2iED79f3KN +Og8ns07nT2Uodajv6yn2J/qNmyI5Tjn+3zikPqO5VlJnbT8D2LqGO2MLRKpiTaJA +S1Gn6VPrJhrl9942WUVQa0z7GrZqt7af3c+dfAxNSVga5O2TJE8bvXtm1CMpC7B9 +0k2g1smXZx9Cma0t2bgnOuDlxaKIWuj5qqdnEh9QQQOQgFpSNibAyRJCr5LfYvh/ +i7uu1ufN2j+NXCrTClKNMZAkRpv6/bBmkOFVtBuVPIjfQD5O8EjMuKXT05fj3+15 +kKnTmQ4nLRX/Jd1C0/CVAZd2wJSYPp2cZq/Xt8HXCK1cN0b7hI2jIX8837zC2UxD +x9a8t3Eux39GroIRlBX2YGGtDFdSYUUOF3Q7rB/FH+VUXEhaDvQXPs/7Pu0CAwEA +AQKCAgAtlwQ9adbLo/ASrYV+dwzsMkv0gY9DTvfhOeHyOnj+DhRN+njHpP9B5ZvW +Hq7xd6r8NKxIUVKb57Irqwh0Uz2FPEG9FIIbjQK1OVxEYJ0NmDJFem/b/n1CODwA +cYAPW541k+MZBRHgKQ67NB3OAeE8PFPw/A8euruRPxH+i3KjXSETE8VAO0rIhEMz +Ie2TQRydLKp71mJg45grJ17Sxmc7STT8efoQVKgjCwPkEGiqYpiNk2uhZ2lVGRC9 +cyG6gu74TdyTDQss1e7Xt+fUIZ2+3d6eJt6NvjC+25Ho4SwO9eYjF1qnQ++KqATr +TOoOaADPLLaXZCFZ1D+s9Dq4Vrj+QGk8Fajotj4gBpUtc0JxtvYM9EhlW7DpchYm +Cxe8vmEi/54YErXKawTUXYBB8IeDzwtvi3v3ktmH8BsGJ6Y3RXDI9KIG/6IE5Xeu +hkPCJnB0e3G2nlaffNSrVknxF+z74DB3T2kj0zC/4H4/hHo4W5D/pswcGWlhREWG +E7ViXJjBRkc5tpS9HfNdZ2wHiccioDIdGSHGqGMF4rLCUE2n+zc4m6pvvNCjN5KB +S4+zps50Gqtbp3DH2h1YLtkzuzvDhgpMPyJ1qZsdgelRSi2IaE5oekuBGP2WeXFw +DLI/cijc13cCacH+kpllQL//zBP8mMGmussWGgrVXdm9ZqD+rQKCAQEA6OG+s8sa +QZJ8W1nukcaS5rSvJBeZO6neCd6EB4oew5UGJsSz+x4RtJ7aJhdTGtyCXqiR2uFw +SBYdTcOgNbBUXg39vWAv+k2lmxiMGuLnAcNcGYyDLXr1SUJwe4Be984WNFdqzY0z +LCd9NvutWWX0Xd1VBdhlDuu3eBenzPBKIxTk3N2gLvzYxC/62e29Trsm7Sur11ut +Jay/CRdomjaqIiZ8q8qgdSU+pPe2DZYzUOutySJhLUegrrgWvPS/i8FHf7AGRgki +wpFn3gy5zCsFzr6n/TzJ5zQvlz+PcbUHHb06U1cnT45fkFNAJJvBYa4vi/tRx92E +Bi8d4bn40fUo3wKCAQEA0HFDHzhRxN/RbzBkymGlgfrsKcBdaAzgClo5uAXr8sdi +efsgBFo228I5lK6ywfzOfD/UxGB6ucdkZb/tRLtoK0OqOGiNx2Q1yazRVbuhrBrR +Y7DDbh7164o/MAYqPGxTMUxzXia7WBtNm00Tv9pDsw+NTzbrk7OxkLZWbjQEj99T +A9pcqXYA1RJtD/6io/43/oVscWPdRrbrNrJz+27Bsau20MBheVmX5sLTO2iWKTN4 +/ofrvOv0ru0I3ACHiLIaQFXs4snQjlhJm5MJ6kuZVdYKAzyNE+YOPnAxoiQAlHau +E1aV8ON7jmjhwxa2QICCwVcUNmwXU4UztGyGZ5a1swKCAQAi90Ia3LPkhIoHbUlU +uev0l8x0LtbjDm44LSDFwQc9dnKl/4LGgY1HAVLfxUDFF7a7X7QGmTKyoB9mPakg +ZolEVfVzKa4Kdv4We2kN4GOu8BYz/9TyTzPk/ATHhk68BkVvNnDizACS8JrsVn2A +nr5CGalaZ1NFGj9B2MtpCesXuVtjjiMu6ufhDRMtBXUXDSKbGaODglBNB9LnGoyq +GusQlZbCdHoDHMR7IHZFM/ggfkJpoK/WjJqjoSBI3raj1TFXCqbmfRiq/goKXP7I +mO0WTaoLa8Uk4cEDhJeVCwk2feL0AHH2j/npQZav6HLwp6ab7fApgikAhLKH4dRq +MdUhAoIBAQC7svJVf7qqRT3sGTD5yXpnlJPreOzj0IxC5kKJgtOYuJDl9Qw8vxwd +QkXlrHcOFl++JSCsgZCiEHpI4c6AER5Zr0HuL8BUJ9oDtJqA0EhimXeqhLdHR5v9 +sWz7CuInrQgxIX3V75zOVy/IRF0fayWBbeS6y2LRi4O/I2KrNC5TfC/eDVlZxAg1 +1rTdLVg5wqebi3w+k0Xj8r3WcFXeuTq0ikNCsapUwyf1RcU+/wwRJ+exlKXkZrnc +d1h9/AAQSQk4m+eHxWIHfFs0O/E2yULXt7kmdvU3UPfMo+0d67uV9VUF1veIhuBx +OeLqcV5GsTKNdaOe6jELJayMsRlK2LzfAoIBAEoWFSUdf3ruvj+ONju0TDtdvvTb ++i+3ttqMK/duYM2TlD3Lvqyx3kNxlMTAArfvnwtKVSw0ZIGSPc/5KHnxldcdALgT +4Ub1YesUv5585thMw1EWyXAPognLhfTEVSLYKcMPoBNCv7FvAT3Mk5SZPReRkbT9 +oqDAzg7r+0+pjD9LmnIXfCxfbSV6zcBFF8/iGAmzh3CanDqVkUds1+Ia8018cfDS +KW5PQAEnJC/BZAI7SQsxH0J9M7NYxJRN0bua5Be0N+uuYSOa+d9yecugfmvga6jf +9nEcohJShacCSkQvIXlq5Uy/WBb6sbiTmHjjW14FG25B0rrQUjmFAUiYceI= +-----END RSA PRIVATE KEY----- diff --git a/examples/data/x509/create.sh b/examples/data/x509/create.sh new file mode 100755 index 000000000000..2b5aa5cffa07 --- /dev/null +++ b/examples/data/x509/create.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +# Create the server CA certs. +openssl req -x509 \ + -newkey rsa:4096 \ + -nodes \ + -days 3650 \ + -keyout ca_key.pem \ + -out ca_cert.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server_ca/ \ + -config ./openssl.cnf \ + -extensions test_ca \ + -sha256 + +# Create the client CA certs. +openssl req -x509 \ + -newkey rsa:4096 \ + -nodes \ + -days 3650 \ + -keyout client_ca_key.pem \ + -out client_ca_cert.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client_ca/ \ + -config ./openssl.cnf \ + -extensions test_ca \ + -sha256 + +# Generate a server cert. +openssl genrsa -out server_key.pem 4096 +openssl req -new \ + -key server_key.pem \ + -days 3650 \ + -out server_csr.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server1/ \ + -config ./openssl.cnf \ + -reqexts test_server +openssl x509 -req \ + -in server_csr.pem \ + -CAkey ca_key.pem \ + -CA ca_cert.pem \ + -days 3650 \ + -set_serial 1000 \ + -out server_cert.pem \ + -extfile ./openssl.cnf \ + -extensions test_server \ + -sha256 +openssl verify -verbose -CAfile ca_cert.pem server_cert.pem + +# Generate a client cert. +openssl genrsa -out client_key.pem 4096 +openssl req -new \ + -key client_key.pem \ + -days 3650 \ + -out client_csr.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ + -config ./openssl.cnf \ + -reqexts test_client +openssl x509 -req \ + -in client_csr.pem \ + -CAkey client_ca_key.pem \ + -CA client_ca_cert.pem \ + -days 3650 \ + -set_serial 1000 \ + -out client_cert.pem \ + -extfile ./openssl.cnf \ + -extensions test_client \ + -sha256 +openssl verify -verbose -CAfile client_ca_cert.pem client_cert.pem + +rm *_csr.pem diff --git a/examples/data/x509/openssl.cnf b/examples/data/x509/openssl.cnf new file mode 100644 index 000000000000..d1034214e1d3 --- /dev/null +++ b/examples/data/x509/openssl.cnf @@ -0,0 +1,28 @@ +[req] +distinguished_name = req_distinguished_name +attributes = req_attributes + +[req_distinguished_name] + +[req_attributes] + +[test_ca] +basicConstraints = critical,CA:TRUE +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer:always +keyUsage = critical,keyCertSign + +[test_server] +basicConstraints = critical,CA:FALSE +subjectKeyIdentifier = hash +keyUsage = critical,digitalSignature,keyEncipherment,keyAgreement +subjectAltName = @server_alt_names + +[server_alt_names] +DNS.1 = *.test.example.com + +[test_client] +basicConstraints = critical,CA:FALSE +subjectKeyIdentifier = hash +keyUsage = critical,nonRepudiation,digitalSignature,keyEncipherment +extendedKeyUsage = critical,clientAuth diff --git a/examples/data/x509/server_cert.pem b/examples/data/x509/server_cert.pem index 3e48a52fd108..f1a374008342 100644 --- a/examples/data/x509/server_cert.pem +++ b/examples/data/x509/server_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIwMDgwNDAxNTk1OFoXDTMwMDgwMjAxNTk1 +BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIyMDMxODIxNDQ1OFoXDTMyMDMxNTIxNDQ1 OFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAKonkszKvSg1IUvpfW3PAeDPLgLrXboOWJCXv3RD -5q6vf29+IBCaljSJmU6T7SplokUML5ZkY6adjX6awG+LH3tOMg9zvXpHuSPRpFUk -2oLFtaWuzJ+NC5HIM0wWDvdZ6KQsiPFbNxk2Rhkk+QKsiiptZy2yf/AbDY0sVieZ -BJZJ+os+BdFIk7+XUgDutPdSAutTANhrGycYa4iYAfDGQApz3sndSSsM2KVc0w5F -gW6w2UBC4ggc1ZaWdbVtkYo+0dCsrl1J7WUNsz8v8mjGsvm9eFuJjKFBiDhCF+xg -4Xzu1Wz7zV97994la/xMImQR4QDdky9IgKcJMVUGua6U0GE5lmt2wnd3aAI228Vm -6SnK7kKvnD8vRUyM9ByeRoMlrAuYb0AjnVBr/MTFbOaii6w2v3RjU0j6YFzp8+67 -ihOW9nkb1ayqSXD3T4QUD0p75Ne7/zz1r2amIh9pmSJlugLexVDpb86vXg9RnXjb -Zn2HTEkXsL5eHUIlQzuhK+gdmj+MLGf/Yzp3fdaJsA0cJfMjj5Ubb2gR4VwzrHy9 -AD2Kjjzs06pTtpULChwpr9IBTLEsZfw/4uW4II4pfe6Rwn4bGHFifjx0+3svlsSo -jdHcXEMHvdRPhWGUZ0rne+IK6Qxgb3OMZu7a04vV0RqvgovxM6hre3e0UzBJG45Y -qlQjAgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFFL5HUzehgKNfgdz -4nuw5fru5OTPMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh -bXBsZS5jb20wDQYJKoZIhvcNAQEFBQADggIBAHMPYTF4StfSx9869EoitlEi7Oz2 -YTOForDbsY9i0VnIamhIi9CpjekAGLo8SVojeAk7UV3ayiu0hEMAHJWbicgWTwWM -JvZWWfrIk/2WYyBWWTa711DuW26cvtbSebFzXsovNeTqMICiTeYbvOAK826UdH/o -OqNiHL+UO5xR1Xmqa2hKmLSl5J1n+zgm94l6SROzc9c5YDzn03U+8dlhoyXCwlTv -JRprOD+lupccxcKj5Tfh9/G6PjKsgxW+DZ+rvQV5f/l7c4m/bBrgS8tru4t2Xip0 -NhQW4qHnL0wXdTjaOG/1liLppjcp7SsP+vKF4shUvp+P8NQuAswBp/QtqUse5EYl -EUARWrjEpV4OHSKThkMackMg5E32keiOvQE6iICxtU+m2V+C3xXM3G2cGlDDx5Ob -tan0c9fZXoygrN2mc94GPogfwFGxwivajvvJIs/bsB3RkcIuLbi2UB76Wwoq+ZvH -15xxNZI1rpaDhjEuqwbSGPMPVpFtF5VERgYQ9LaDgj7yorwSQ1YLY8R1y0vSiAR2 -2YeOaBH1ZLPF9v9os1iK4TIC8XQfPv7ll2WdDwfbe2ux5GVbDBD4bPhP9s3F4a+f -oPhikWsUY4eN5CfS76x6xL0L60TL1AlWLlwuubTxpvNhv3GSyxjfunjcGiXDml20 -6S80qO4hepxzzjol +AQEBBQADggIPADCCAgoCggIBAL5GBWw+qfXyelelYL/RDA/Fk4GA8DlcBQgBOjBa +XCVDMAJj63sN+ubKBtphWe6Y9SWLJa2mt8a/ZTQZm2R5FPSp9rwdr04UQgmL11wh +DCmO+wkRUeTYwsqcidEHRwOxoctyO+lwgYw983T/fp83qtNS4bw+1kJwrLtFdgok +Kd9UGIugs8BTFqE/7CxFRXTYsNy/gj0pp411Dtgknl1UefPdjco2Qon8f3Dm5iDf +AyUM1oL8+fnRQj/r6P3XC4AOiBsF3duxiBzUp87YgmwDOaa8paKOx2UNLA/eP/aP +Uhd7HkygqOX+tc3H8dvYONo6lhwQD1JqyG6IOOWe2uf5YXKK2TphPPRnCW4QIED4 +PuXYHjIvGYA4Kf0Wmb2hPk6bxJidNoLp9lsJyqGfk3QnT5PRJVgO0mlzo/UsZo77 +5j+yq87yLe5OL2HrZd1KTfg7SKOtMJ9N6tm2Hw2jwypKz+x2jlEZOgXHmYb5aUaI ++4xG+9fqc8x3ScoHQGNujF3qHO5SxnXkufNUSVbWbv1Ble8peiKyG6AFQvtcs7KG +pEoFztGSlaABwSvxO8J3aJPAEok4OI5IAGJNy92XaBMLtyt270FC8JtUnL+JEubV +t8tY5cCcGK7EtRHb47mM0K8HEq+IU2nAq6/29Ka0IZlkb5fPoWzQAZEIVKgLNHt4 +96g9AgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFNx36JXsCIzVWCOw +1ETtaxlN79XrMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh +bXBsZS5jb20wDQYJKoZIhvcNAQELBQADggIBAAEEZln7lsS/HIysNPJktc0Gdu3n +X1BcA3wXh95YTugcxSSeLLx2SykXnwX+cJncc1OKbboO9DA5mZ+huCesGIOKeUkg +azQZL6FAdw9PQKdqKg3RgSQ4XhK990fPcmmBhSXY24jNNhRHxGw5lGBrD6X2SdW3 +m66yYzn9hMXL4yrweGO7OC4bdyISDrJiP+St/xeCoIcXP2s07dE6jl2VorJCWn4J +SxKfDhPPohZKl6dL9npkmPcpz2zRAYpo4tsVdAAQDBRui44Vvm1eBPUo7EH2UOEh +/3JtTeDUpldM8fDaKE0kTa1Ttxzs2e0Jm3M4/FMOxqSesyJldw54F4+4m24e/iQU +gceArYMFVFTipgrLfUuRvRxx/7D7V92pqTyuD3T78+KdTqrlxvCTOqSHhFE05jWD +RdynS6Ev/1QZLlnWgMwhQAnjhc1NKkso+namF1ZmHH9owiTRBlWDMNcHMDReaELd +QmFUvutHUpjidt1z+G6lzbP0XB5w+0vW4BsT0FqaYsFbK5ftryj1/K0VctrSd/ke +GI0vxrErAyLG2B8bdK88u2w7DCuXjAOp+CeA7HUmk93TsPEAhrxQ6lR51IC6LcK0 +gACSdnQDPGtkoRX00DTvdcOpzmkSgaGr/mXTqp2lR9IuZIhwKbhS3lDKsAZ/hinB +yaBwLiXfcvZrZOwy -----END CERTIFICATE----- diff --git a/examples/data/x509/server_key.pem b/examples/data/x509/server_key.pem index e71ad0ac9753..1c778db7c491 100644 --- a/examples/data/x509/server_key.pem +++ b/examples/data/x509/server_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAqieSzMq9KDUhS+l9bc8B4M8uAutdug5YkJe/dEPmrq9/b34g -EJqWNImZTpPtKmWiRQwvlmRjpp2NfprAb4sfe04yD3O9eke5I9GkVSTagsW1pa7M -n40LkcgzTBYO91nopCyI8Vs3GTZGGST5AqyKKm1nLbJ/8BsNjSxWJ5kElkn6iz4F -0UiTv5dSAO6091IC61MA2GsbJxhriJgB8MZACnPeyd1JKwzYpVzTDkWBbrDZQELi -CBzVlpZ1tW2Rij7R0KyuXUntZQ2zPy/yaMay+b14W4mMoUGIOEIX7GDhfO7VbPvN -X3v33iVr/EwiZBHhAN2TL0iApwkxVQa5rpTQYTmWa3bCd3doAjbbxWbpKcruQq+c -Py9FTIz0HJ5GgyWsC5hvQCOdUGv8xMVs5qKLrDa/dGNTSPpgXOnz7ruKE5b2eRvV -rKpJcPdPhBQPSnvk17v/PPWvZqYiH2mZImW6At7FUOlvzq9eD1GdeNtmfYdMSRew -vl4dQiVDO6Er6B2aP4wsZ/9jOnd91omwDRwl8yOPlRtvaBHhXDOsfL0APYqOPOzT -qlO2lQsKHCmv0gFMsSxl/D/i5bggjil97pHCfhsYcWJ+PHT7ey+WxKiN0dxcQwe9 -1E+FYZRnSud74grpDGBvc4xm7trTi9XRGq+Ci/EzqGt7d7RTMEkbjliqVCMCAwEA -AQKCAgEAjU6UEVMFSBDnd/2OVtUlQCeOlIoWql8jmeEL9Gg3eTbx5AugYWmf+D2V -fbZHrX/+BM2b74+rWkFZspyd14R4PpSv6jk6UASkcmS1zqfud8/tjIzgDli6FPVn -9HYVM8IM+9qoV5hi56M1D8iuq1PS4m081Kx6p1IwLN93JSdksdL6KQz3E9jsKp5m -UbPrwcDv/7JM723zfMJA+40Rf32EzalwicAl9YSTnrC57g428VAY+88Pm6EmmAqX -8nXt+hs1b9EYdQziA5wfEgiljfIFzHVXMN3IVlrv35iz+XBzkqddw0ZSRkvTiz8U -sNAhd22JqIhapVfWz+FIgM43Ag9ABUMNWoQlaT0+2KlhkL+cZ6J1nfpMTBEIatz0 -A/l4TGcvdDhREODrS5jrxwJNx/LMRENtFFnRzAPzX4RdkFvi8SOioAWRBvs1TZFo -ZLq2bzDOzDjs+EPQVx0SmjZEiBRhI6nC8Way00IdQi3T546r6qTKfPmXgjl5/fVO -J4adGVbEUnI/7+fqL2N82WVr+Le585EFP/6IL5FO++sAIGDqAOzEQhyRaLhmnz+D -GboeS/Tac9XdymFbrEvEMB4EFS3nsZHTeahfiqVd/SuXFDTHZ6kiqXweuhfsP1uW -7tGlnqtn+3zmLO6XRENPVvmjn7DhU255yjiKFdUqkajcoOYyWPECggEBANuYk+sr -UTScvJoh/VRHuqd9NkVVIoqfoTN61x6V1OuNNcmjMWsOIsH+n4SifLlUW6xCKaSK -8x8RJYfE9bnObv/NqM4DMhuaNd52bPKFi8IBbHSZpuRE/UEyJhMDpoto04H1GXx4 -1S49tndiNxQOv1/VojB4BH7kapY0yp30drK1CrocGN+YOUddxI9lOQpgt2AyoXVk -ehdyamK4uzQmkMyyGQljrV5EQbmyPCqZ1l/d0MJ9DixOBxnPDR9Ov9qrG4Dy6S/k -cH8PythqHTGTdlXgsBJaWEl2PyQupo3OhfiCV+79B9uxPfKvk5CIMVbnYxKgu+ly -RKSTSX+GHVgNwicCggEBAMZcwQIAA+I39sTRg/Vn/MxmUBAu3h2+oJcuZ3FQh4v5 -SL80BWEsooK9Oe4MzxyWkU+8FieFu5G6iXaSx8f3Wv6j90IzA3g6Xr9M5xBm5qUN -IqzF+hUZuKAEMY1NcPlFTa2NlrkT8JdfQvJ+D5QrcBIMFmg9cKG5x9yD7MfHTJkf -ztMDFOwP3n7ahKRBowfe7/unAEFf6hYFtYjV+bqMDmBFVmk2CIVtjFgO9BNBQ/LB -zGcnwo2VigWBIjRDF5BgV0v+2g0PZGaxJ362RigZjzJojx3gYj6kaZYX8yb6ttGo -RPGt1A9woz6m0G0fLLMlce1dpbBAna14UVY7AEVt56UCggEAVvii/Oz3CINbHyB/ -GLYf8t3gdK03NPfr/FuWf4KQBYqz1txPYjsDARo7S2ifRTdn51186LIvgApmdtNH -DwP3alClnpIdclktJKJ6m8LQi1HNBpEkTBwWwY9/DODRQT2PJ1VPdsDUja/baIT5 -k3QTz3zo85FVFnyYyky2QsDjkfup9/PQ1h2P8fftNW29naKYff0PfVMCF+80u0y2 -t/zeNHQE/nb/3unhrg4tTiIHiYhsedrVli6BGXOrms6xpYVHK1cJi/JJq8kxaWz9 -ivkAURrgISSu+sleUJI5XMiCvt3AveJxDk2wX0Gyi/eksuqJjoMiaV7cWOIMpfkT -/h/U2QKCAQAFirvduXBiVpvvXccpCRG4CDe+bADKpfPIpYRAVzaiQ4GzzdlEoMGd -k3nV28fBjbdbme6ohgT6ilKi3HD2dkO1j5Et6Uz0g/T3tUdTXvycqeRJHXLiOgi9 -d8CGqR456KTF74nBe/whzoiJS9pVkm0cI/hQSz8lVZJu58SqxDewo4HcxV5FRiA6 -PRKtoCPU6Xac+kp4iRx6JwiuXQQQIS+ZovZKFDdiuu/L2gcZrp4eXym9zA+UcxQb -GUOCYEl9QCPQPLuM19w/Pj3TPXZyUlx81Q0Cka1NALzuc5bYhPKsot3iPrAJCmWV -L4XtNozCKI6pSg+CABwnp4/mL9nPFsX9AoIBAQDHiDhG9jtBdgtAEog6oL2Z98qR -u5+nONtLQ61I5R22eZYOgWfxnz08fTtpaHaVWNLNzF0ApyxjxD+zkFHcMJDUuHkR -O0yxUbCaof7u8EFtq8P9ux4xjtCnZW+9da0Y07zBrcXTsHYnAOiqNbtvVYd6RPiW -AaE61hgvj1c9/BQh2lUcroQx+yJI8uAAQrfYtXzm90rb6qk6rWy4li2ybMjB+LmP -cIQIXIUzdwE5uhBnwIre74cIZRXFJBqFY01+mT8ShPUWJkpOe0Fojrkl633TUuNf -9thZ++Fjvs4s7alFH5Hc7Ulk4v/O1+owdjqERd8zlu7+568C9s50CGwFnH0d +MIIJKQIBAAKCAgEAvkYFbD6p9fJ6V6Vgv9EMD8WTgYDwOVwFCAE6MFpcJUMwAmPr +ew365soG2mFZ7pj1JYslraa3xr9lNBmbZHkU9Kn2vB2vThRCCYvXXCEMKY77CRFR +5NjCypyJ0QdHA7Ghy3I76XCBjD3zdP9+nzeq01LhvD7WQnCsu0V2CiQp31QYi6Cz +wFMWoT/sLEVFdNiw3L+CPSmnjXUO2CSeXVR5892NyjZCifx/cObmIN8DJQzWgvz5 ++dFCP+vo/dcLgA6IGwXd27GIHNSnztiCbAM5pryloo7HZQ0sD94/9o9SF3seTKCo +5f61zcfx29g42jqWHBAPUmrIbog45Z7a5/lhcorZOmE89GcJbhAgQPg+5dgeMi8Z +gDgp/RaZvaE+TpvEmJ02gun2WwnKoZ+TdCdPk9ElWA7SaXOj9SxmjvvmP7KrzvIt +7k4vYetl3UpN+DtIo60wn03q2bYfDaPDKkrP7HaOURk6BceZhvlpRoj7jEb71+pz +zHdJygdAY26MXeoc7lLGdeS581RJVtZu/UGV7yl6IrIboAVC+1yzsoakSgXO0ZKV +oAHBK/E7wndok8ASiTg4jkgAYk3L3ZdoEwu3K3bvQULwm1Scv4kS5tW3y1jlwJwY +rsS1EdvjuYzQrwcSr4hTacCrr/b0prQhmWRvl8+hbNABkQhUqAs0e3j3qD0CAwEA +AQKCAgBnR3CoGbd9hZl8u4qxc5IdeXwgflFmgRlGCAyCtHlxzG9hzMTD7Ymz/hMM +NG1xQltGfqn8AROd8MPJLOEY/1QtnZgM8fv24K4bqmlCW7nTUQXYHSubkUDiY2e3 +K0ETszaETMRSaLwY2IOujQQ4/ilePY3D9UOtmqVXnVN+G7USwP31xEvtZ+xPqHfU +a+FQlFIj8FuMQXDuKozdK7s+I51yjl7pVNx3M7QlH1/olcSKNta1EQXK4RgZxD6a +kkBuyPR93ohXOJ0OMSvI7eKVKIcBh0JM4z0+D5FMJ7IGbjL8Bdsjcs1a0g/y28Xf +NBVf9w8Fun3mmYmj3ZMsqDZgVg/bAfP2z7O9kMzbuqmjelOz8HXxTm/+GIHuseMx +b/nDZgB0ZN+FhATv/onshJcjr2L3SJYzEWqjYiqaCQo5qtib+/kxh6SHPhAY2o8l +zzMhKFsJMhmwW91FXqeDS9FTlcRXtYH1EJxNGa01GpyVa6plvvFTGBNkEUJnVuEp +ULohJw0NJQYQOz5omYaQVJ49lpzVhwLEolgSlIBiM3s9nSDvVBYu+bB1ovw5OTIJ +Wlc9cBrYmdxYdAj5n6JzIC1wixgxrFw1jBm8cL/2FQYtR7daZabTMyZj5vAUqjxr +OV+uvkSFcIyBs1ty9TnnKC3yd5Ma+5chR5u7JPc1lSSor6AwQQKCAQEA4d5XrCq5 +EikGII/unhkVZsh9xmILp/4PRKc+fV7TFEpGyn8HFCBToZk6nXv99roUBdeZFobw +gDuZqBa4ougm2zgBbhdQXGaW4yZdChJlSs9yY7OAVvnG9gjuHGmWsLhvmhaeXSr2 +auxVGRaltr3r8hP9eHhloDM6qdSSAQpsdeTBQD8Ep3//aL/BLqGcF0gLrZLPwo0+ +cku8jQoVXSSOW1+YSaXRGxueuIR8lldU4I3yp2DO++DGLsOZoGFT/+ZXc2B4nE1h +o1hCWt6RKw0q2rCkZ+i6SiPGsVgb9xn6W8wHFIPA/0sOwOdtbKqKd0xwn5DnX+vt +d8shlRRUDF7HDQKCAQEA16gR/2n59HZiQQhHU9BCvGFi4nxlsuij+nqDx9fUerDU +fK79NaOuraWNkCqz+2lqfu5o3e3XNFHlVsj98SyfmTdMZ8Fj19awqN20nCOmfRkk +/MDuEzRzvNlOYBa0PpMkKJn2sahEiXGNVI4g3cGip1c5wJ1HL3jF61io4F/auBLP +grLtw8CoTqc6VpJUvsWFjopTmNdAze8WMf3vK6AKu7PKkXH7mFQZusacpO/E61Ud +euiG9BYDIIkrnWIQdLpODgliLZzPNcJDTKTFJAfIzr3WQvUaFc1+tHyX3XhpicvP +J4zyNfHd2dZMK1csXQJvFSnPgXpy531Wca0riAYZ8QKCAQEAhaVEBxE4dLBlebrw +nAeHjEuxcELvVrWTXzH+XbxP9T+F56eGDriaA5JhBnIpcWXlFxfc82FgyN97KeRX +17y50Riwb+3HlQT23u0CPEVqPfvFWY0KsWwV99qM2a74hRR8pJYhmksjh1zTdYbb +AugZxiFh53iF2Wa2nWq0AX2jc5apalRfcqTgAaEEs4zYiUYN8uRdnmZovsRliqae +wYAx44sK1vkQY5PSNKff+C0wgbY8ECHOF2eGnIEMU8ODKnWm5RP+Ca4Xyckdahsr +lmeyJbhDb2BbaicFGEZkNa/fXZW50r+q4OQOlMHbE2NNjw1hzmi1HyLAXhOJiWZ/ +3NnvuQKCAQEAg04a/zeocBcwhcYjn717FLX6/kmdpkwNo3G7EQ+xmK5YAj6Nf35U +2fel9PR7N4WcyQIiKZYp5PpEOA4SyChSWHiZ9caDIyTd1UOAN11hfmOz6I0Tp+/U +1FQ/azQHtN3kMzBjSxJYAJN56NTM4BiJD3iFemiIsjfH0h7eXBcg1djmLf8B06FX +GOSrGZDpNmqPghVpBvNwyrJbAj9Jw3cjcdvrZ5lOBhaWv+kz8Rzn+h2N4Ir5uF46 +szGxs5bEzD2vTs6Zz4ndhC7uyRi9y81Nj8t4TLZtln7TOdNup/Mr1zGXxM4Fn6DP +YlYfdHgUU+Eqf2lApeZHVfkzi+1TRvPoEQKCAQAELU/d33TNwQ/Ylo2VhwAscY3s +hv31O4tpu5koHHjOo3RDPzjuEfwy006u8NVAoj97LrU2n+XTIlnXf14TKuKWQ+8q +ajIVNj+ZAbD3djCmYXbIEL+u6aL4K1ENdjo6DNTGgPMfISE79WrmGBIKtB//uMqy +fGTUSPeo+R5WmTGN29YxAnRE/jtwOgAcicACTc0e9nghHj3c2raI0IazY5XFP0/h +LszTNUQzWx6DjWsbB+Ymuhu4fHZTYftCrIMpjmjC9pkNggeJnkxylQz/pwO73uWg +ycDgJhRyaVhM8sJXiBk+OC/ySP2Lxo60aPa514LEYJKQxUCukCTXth/6p0Qo -----END RSA PRIVATE KEY----- diff --git a/examples/examples_test.sh b/examples/examples_test.sh index 9015272f33e0..bead4d0dcbe1 100755 --- a/examples/examples_test.sh +++ b/examples/examples_test.sh @@ -20,6 +20,9 @@ set +e export TMPDIR=$(mktemp -d) trap "rm -rf ${TMPDIR}" EXIT +export SERVER_PORT=50051 +export UNIX_ADDR=abstract-unix-socket + clean () { for i in {1..10}; do jobs -p | xargs -n1 pkill -P @@ -49,45 +52,97 @@ EXAMPLES=( "helloworld" "route_guide" "features/authentication" + "features/authz" + "features/cancellation" "features/compression" "features/deadline" "features/encryption/TLS" - "features/errors" + "features/error_details" + "features/error_handling" "features/interceptor" "features/load_balancing" "features/metadata" + "features/metadata_interceptor" "features/multiplex" "features/name_resolving" + "features/orca" + "features/retry" + "features/unix_abstract" +) + +declare -A SERVER_ARGS=( + ["features/unix_abstract"]="-addr $UNIX_ADDR" + ["default"]="-port $SERVER_PORT" +) + +declare -A CLIENT_ARGS=( + ["features/unix_abstract"]="-addr $UNIX_ADDR" + ["features/orca"]="-test=true" + ["default"]="-addr localhost:$SERVER_PORT" ) +declare -A SERVER_WAIT_COMMAND=( + ["features/unix_abstract"]="lsof -U | grep $UNIX_ADDR" + ["default"]="lsof -i :$SERVER_PORT | grep $SERVER_PORT" +) + +wait_for_server () { + example=$1 + wait_command=${SERVER_WAIT_COMMAND[$example]:-${SERVER_WAIT_COMMAND["default"]}} + echo "$(tput setaf 4) waiting for server to start $(tput sgr 0)" + for i in {1..10}; do + eval "$wait_command" 2>&1 &>/dev/null + if [ $? -eq 0 ]; then + pass "server started" + return + fi + sleep 1 + done + fail "cannot determine if server started" +} + declare -A EXPECTED_SERVER_OUTPUT=( ["helloworld"]="Received: world" ["route_guide"]="" ["features/authentication"]="server starting on port 50051..." + ["features/authz"]="unary echoing message \"hello world\"" + ["features/cancellation"]="server: error receiving from stream: rpc error: code = Canceled desc = context canceled" ["features/compression"]="UnaryEcho called with message \"compress\"" ["features/deadline"]="" ["features/encryption/TLS"]="" - ["features/errors"]="" + ["features/error_details"]="" + ["features/error_handling"]="" ["features/interceptor"]="unary echoing message \"hello world\"" ["features/load_balancing"]="serving on :50051" ["features/metadata"]="message:\"this is examples/metadata\", sending echo" + ["features/metadata_interceptor"]="key1 from metadata: " ["features/multiplex"]=":50051" ["features/name_resolving"]="serving on localhost:50051" + ["features/orca"]="Server listening" + ["features/retry"]="request succeeded count: 4" + ["features/unix_abstract"]="serving on @abstract-unix-socket" ) declare -A EXPECTED_CLIENT_OUTPUT=( ["helloworld"]="Greeting: Hello world" ["route_guide"]="Feature: name: \"\", point:(416851321, -742674555)" ["features/authentication"]="UnaryEcho: hello world" + ["features/authz"]="UnaryEcho: hello world" + ["features/cancellation"]="cancelling context" ["features/compression"]="UnaryEcho call returned \"compress\", " ["features/deadline"]="wanted = DeadlineExceeded, got = DeadlineExceeded" ["features/encryption/TLS"]="UnaryEcho: hello world" - ["features/errors"]="Greeting: Hello world" + ["features/error_details"]="Greeting: Hello world" + ["features/error_handling"]="Received error" ["features/interceptor"]="UnaryEcho: hello world" ["features/load_balancing"]="calling helloworld.Greeter/SayHello with pick_first" ["features/metadata"]="this is examples/metadata" + ["features/metadata_interceptor"]="BidiStreaming Echo: hello world" ["features/multiplex"]="Greeting: Hello multiplex" ["features/name_resolving"]="calling helloworld.Greeter/SayHello to \"example:///resolver.example.grpc.io\"" + ["features/orca"]="Per-call load report received: map\[db_queries:10\]" + ["features/retry"]="UnaryEcho reply: message:\"Try and Success\"" + ["features/unix_abstract"]="calling echo.Echo/UnaryEcho to unix-abstract:abstract-unix-socket" ) cd ./examples @@ -102,6 +157,13 @@ for example in ${EXAMPLES[@]}; do pass "successfully built server" fi + # Start server + SERVER_LOG="$(mktemp)" + server_args=${SERVER_ARGS[$example]:-${SERVER_ARGS["default"]}} + go run ./$example/*server/*.go $server_args &> $SERVER_LOG & + + wait_for_server $example + # Build client if ! go build -o /dev/null ./${example}/*client/*.go; then fail "failed to build client" @@ -109,12 +171,10 @@ for example in ${EXAMPLES[@]}; do pass "successfully built client" fi - # Start server - SERVER_LOG="$(mktemp)" - go run ./$example/*server/*.go &> $SERVER_LOG & - + # Start client CLIENT_LOG="$(mktemp)" - if ! timeout 20 go run ${example}/*client/*.go &> $CLIENT_LOG; then + client_args=${CLIENT_ARGS[$example]:-${CLIENT_ARGS["default"]}} + if ! timeout 20 go run ${example}/*client/*.go $client_args &> $CLIENT_LOG; then fail "client failed to communicate with server got server log: $(cat $SERVER_LOG) diff --git a/examples/features/authentication/README.md b/examples/features/authentication/README.md index 0ba3f9469fc5..57028b8795d3 100644 --- a/examples/features/authentication/README.md +++ b/examples/features/authentication/README.md @@ -29,9 +29,9 @@ https://godoc.org/google.golang.org/grpc/credentials/oauth for details. #### Client -On client side, users should first get a valid oauth token, and then call -[`credentials.NewOauthAccess`](https://godoc.org/google.golang.org/grpc/credentials/oauth#NewOauthAccess) -to initialize a `credentials.PerRPCCredentials` with it. Next, if user wants to +On client side, users should first get a valid oauth token, and then initialize a +[`oauth.TokenSource`](https://godoc.org/google.golang.org/grpc/credentials/oauth#TokenSource) +which implements `credentials.PerRPCCredentials`. Next, if user wants to apply a single OAuth token for all RPC calls on the same connection, then configure grpc `Dial` with `DialOption` [`WithPerRPCCredentials`](https://godoc.org/google.golang.org/grpc#WithPerRPCCredentials). diff --git a/examples/features/authentication/client/main.go b/examples/features/authentication/client/main.go index 0c5c9d948e37..a189b4be8cfa 100644 --- a/examples/features/authentication/client/main.go +++ b/examples/features/authentication/client/main.go @@ -50,7 +50,7 @@ func main() { flag.Parse() // Set up the credentials for the connection. - perRPC := oauth.NewOauthAccess(fetchToken()) + perRPC := oauth.TokenSource{TokenSource: oauth2.StaticTokenSource(fetchToken())} creds, err := credentials.NewClientTLSFromFile(data.Path("x509/ca_cert.pem"), "x.test.example.com") if err != nil { log.Fatalf("failed to load credentials: %v", err) @@ -61,12 +61,11 @@ func main() { // itself. // See: https://godoc.org/google.golang.org/grpc#PerRPCCredentials grpc.WithPerRPCCredentials(perRPC), - // oauth.NewOauthAccess requires the configuration of transport + // oauth.TokenSource requires the configuration of transport // credentials. grpc.WithTransportCredentials(creds), } - opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(*addr, opts...) if err != nil { log.Fatalf("did not connect: %v", err) diff --git a/examples/features/authz/README.md b/examples/features/authz/README.md new file mode 100644 index 000000000000..498beb367f1e --- /dev/null +++ b/examples/features/authz/README.md @@ -0,0 +1,40 @@ +# RBAC authorization + +This example uses the `StaticInterceptor` from the `google.golang.org/grpc/authz` +package. It uses a header based RBAC policy to match each gRPC method to a +required role. For simplicity, the context is injected with mock metadata which +includes the required roles, but this should be fetched from an appropriate +service based on the authenticated context. + +## Try it + +Server requires the following roles on an authenticated user to authorize usage +of these methods: + +- `UnaryEcho` requires the role `UNARY_ECHO:W` +- `BidirectionalStreamingEcho` requires the role `STREAM_ECHO:RW` + +Upon receiving a request, the server first checks that a token was supplied, +decodes it and checks that a secret is correctly set (hardcoded to `super-secret` +for simplicity, this should use a proper ID provider in production). + +If the above is successful, it uses the username in the token to set appropriate +roles (hardcoded to the 2 required roles above if the username matches `super-user` +for simplicity, these roles should be supplied externally as well). + +Start the server with: + +``` +go run server/main.go +``` + +The client implementation shows how using a valid token (setting username and +secret) with each of the endpoints will return successfully. It also exemplifies +how using a bad token will result in `codes.PermissionDenied` being returned +from the service. + +Start the client with: + +``` +go run client/main.go +``` diff --git a/examples/features/authz/client/main.go b/examples/features/authz/client/main.go new file mode 100644 index 000000000000..2654314e5e11 --- /dev/null +++ b/examples/features/authz/client/main.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client. +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "time" + + "golang.org/x/oauth2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/oauth" + "google.golang.org/grpc/examples/data" + "google.golang.org/grpc/examples/features/authz/token" + ecpb "google.golang.org/grpc/examples/features/proto/echo" + "google.golang.org/grpc/status" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") + +func callUnaryEcho(ctx context.Context, client ecpb.EchoClient, message string, opts ...grpc.CallOption) error { + resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}, opts...) + if err != nil { + return status.Errorf(status.Code(err), "UnaryEcho RPC failed: %v", err) + } + fmt.Println("UnaryEcho: ", resp.Message) + return nil +} + +func callBidiStreamingEcho(ctx context.Context, client ecpb.EchoClient, opts ...grpc.CallOption) error { + c, err := client.BidirectionalStreamingEcho(ctx, opts...) + if err != nil { + return status.Errorf(status.Code(err), "BidirectionalStreamingEcho RPC failed: %v", err) + } + for i := 0; i < 5; i++ { + err := c.Send(&ecpb.EchoRequest{Message: fmt.Sprintf("Request %d", i+1)}) + if err == io.EOF { + // Bidi streaming RPC errors happen and make Send return io.EOF, + // not the RPC error itself. Call Recv to determine the error. + break + } + if err != nil { + // Some local errors are reported this way, e.g. errors serializing + // the request message. + return status.Errorf(status.Code(err), "sending StreamingEcho message: %v", err) + } + } + c.CloseSend() + for { + resp, err := c.Recv() + if err == io.EOF { + break + } + if err != nil { + return status.Errorf(status.Code(err), "receiving StreamingEcho message: %v", err) + } + fmt.Println("BidiStreaming Echo: ", resp.Message) + } + return nil +} + +func newCredentialsCallOption(t token.Token) grpc.CallOption { + tokenBase64, err := t.Encode() + if err != nil { + log.Fatalf("encoding token: %v", err) + } + oath2Token := oauth2.Token{AccessToken: tokenBase64} + return grpc.PerRPCCredentials(oauth.TokenSource{TokenSource: oauth2.StaticTokenSource(&oath2Token)}) +} + +func main() { + flag.Parse() + + // Create tls based credential. + creds, err := credentials.NewClientTLSFromFile(data.Path("x509/ca_cert.pem"), "x.test.example.com") + if err != nil { + log.Fatalf("failed to load credentials: %v", err) + } + // Set up a connection to the server. + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds)) + if err != nil { + log.Fatalf("grpc.Dial(%q): %v", *addr, err) + } + defer conn.Close() + + // Make an echo client and send RPCs. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + client := ecpb.NewEchoClient(conn) + + // Make RPCs as an authorized user and expect them to succeed. + authorisedUserTokenCallOption := newCredentialsCallOption(token.Token{Username: "super-user", Secret: "super-secret"}) + if err := callUnaryEcho(ctx, client, "hello world", authorisedUserTokenCallOption); err != nil { + log.Fatalf("Unary RPC by authorized user failed: %v", err) + } + if err := callBidiStreamingEcho(ctx, client, authorisedUserTokenCallOption); err != nil { + log.Fatalf("Bidirectional RPC by authorized user failed: %v", err) + } + + // Make RPCs as an unauthorized user and expect them to fail with status code PermissionDenied. + unauthorisedUserTokenCallOption := newCredentialsCallOption(token.Token{Username: "bad-actor", Secret: "super-secret"}) + if err := callUnaryEcho(ctx, client, "hello world", unauthorisedUserTokenCallOption); err != nil { + switch c := status.Code(err); c { + case codes.PermissionDenied: + log.Printf("Unary RPC by unauthorized user failed as expected: %v", err) + default: + log.Fatalf("Unary RPC by unauthorized user failed unexpectedly: %v, %v", c, err) + } + } + if err := callBidiStreamingEcho(ctx, client, unauthorisedUserTokenCallOption); err != nil { + switch c := status.Code(err); c { + case codes.PermissionDenied: + log.Printf("Bidirectional RPC by unauthorized user failed as expected: %v", err) + default: + log.Fatalf("Bidirectional RPC by unauthorized user failed unexpectedly: %v", err) + } + } +} diff --git a/examples/features/authz/server/main.go b/examples/features/authz/server/main.go new file mode 100644 index 000000000000..e06ddbdf153e --- /dev/null +++ b/examples/features/authz/server/main.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server. +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "log" + "net" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/authz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/examples/data" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "google.golang.org/grpc/examples/features/authz/token" + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +const ( + unaryEchoWriterRole = "UNARY_ECHO:W" + streamEchoReadWriterRole = "STREAM_ECHO:RW" + authzPolicy = ` + { + "name": "authz", + "allow_rules": [ + { + "name": "allow_UnaryEcho", + "request": { + "paths": ["/grpc.examples.echo.Echo/UnaryEcho"], + "headers": [ + { + "key": "UNARY_ECHO:W", + "values": ["true"] + } + ] + } + }, + { + "name": "allow_BidirectionalStreamingEcho", + "request": { + "paths": ["/grpc.examples.echo.Echo/BidirectionalStreamingEcho"], + "headers": [ + { + "key": "STREAM_ECHO:RW", + "values": ["true"] + } + ] + } + } + ], + "deny_rules": [] + } + ` +) + +var ( + port = flag.Int("port", 50051, "the port to serve on") + + errMissingMetadata = status.Errorf(codes.InvalidArgument, "missing metadata") +) + +func newContextWithRoles(ctx context.Context, username string) context.Context { + md := metadata.MD{} + if username == "super-user" { + md.Set(unaryEchoWriterRole, "true") + md.Set(streamEchoReadWriterRole, "true") + } + return metadata.NewIncomingContext(ctx, md) +} + +type server struct { + pb.UnimplementedEchoServer +} + +func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { + fmt.Printf("unary echoing message %q\n", in.Message) + return &pb.EchoResponse{Message: in.Message}, nil +} + +func (s *server) BidirectionalStreamingEcho(stream pb.Echo_BidirectionalStreamingEchoServer) error { + for { + in, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + fmt.Printf("Receiving message from stream: %v\n", err) + return err + } + fmt.Printf("bidi echoing message %q\n", in.Message) + stream.Send(&pb.EchoResponse{Message: in.Message}) + } +} + +// isAuthenticated validates the authorization. +func isAuthenticated(authorization []string) (username string, err error) { + if len(authorization) < 1 { + return "", errors.New("received empty authorization token from client") + } + tokenBase64 := strings.TrimPrefix(authorization[0], "Bearer ") + // Perform the token validation here. For the sake of this example, the code + // here forgoes any of the usual OAuth2 token validation and instead checks + // for a token matching an arbitrary string. + var token token.Token + err = token.Decode(tokenBase64) + if err != nil { + return "", fmt.Errorf("base64 decoding of received token %q: %v", tokenBase64, err) + } + if token.Secret != "super-secret" { + return "", fmt.Errorf("received token %q does not match expected %q", token.Secret, "super-secret") + } + return token.Username, nil +} + +// authUnaryInterceptor looks up the authorization header from the incoming RPC context, +// retrieves the username from it and creates a new context with the username before invoking +// the provided handler. +func authUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMissingMetadata + } + username, err := isAuthenticated(md["authorization"]) + if err != nil { + return nil, status.Error(codes.Unauthenticated, err.Error()) + } + return handler(newContextWithRoles(ctx, username), req) +} + +// wrappedStream wraps a grpc.ServerStream associated with an incoming RPC, and +// a custom context containing the username derived from the authorization header +// specified in the incoming RPC metadata +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} + +func newWrappedStream(ctx context.Context, s grpc.ServerStream) grpc.ServerStream { + return &wrappedStream{s, ctx} +} + +// authStreamInterceptor looks up the authorization header from the incoming RPC context, +// retrieves the username from it and creates a new context with the username before invoking +// the provided handler. +func authStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + md, ok := metadata.FromIncomingContext(ss.Context()) + if !ok { + return errMissingMetadata + } + username, err := isAuthenticated(md["authorization"]) + if err != nil { + return status.Error(codes.Unauthenticated, err.Error()) + } + return handler(srv, newWrappedStream(newContextWithRoles(ss.Context(), username), ss)) +} + +func main() { + flag.Parse() + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("Listening on local port %q: %v", *port, err) + } + + // Create tls based credential. + creds, err := credentials.NewServerTLSFromFile(data.Path("x509/server_cert.pem"), data.Path("x509/server_key.pem")) + if err != nil { + log.Fatalf("Loading credentials: %v", err) + } + + // Create an authorization interceptor using a static policy. + staticInteceptor, err := authz.NewStatic(authzPolicy) + if err != nil { + log.Fatalf("Creating a static authz interceptor: %v", err) + } + unaryInts := grpc.ChainUnaryInterceptor(authUnaryInterceptor, staticInteceptor.UnaryInterceptor) + streamInts := grpc.ChainStreamInterceptor(authStreamInterceptor, staticInteceptor.StreamInterceptor) + s := grpc.NewServer(grpc.Creds(creds), unaryInts, streamInts) + + // Register EchoServer on the server. + pb.RegisterEchoServer(s, &server{}) + + if err := s.Serve(lis); err != nil { + log.Fatalf("Serving Echo service on local port: %v", err) + } +} diff --git a/examples/features/authz/token/token.go b/examples/features/authz/token/token.go new file mode 100644 index 000000000000..4899f3b08658 --- /dev/null +++ b/examples/features/authz/token/token.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package token implements an example of authorization token encoding/decoding +// that can be used in RPC headers. +package token + +import ( + "encoding/base64" + "encoding/json" +) + +// Token is a mock authorization token sent by the client as part of the RPC headers, +// and used by the server for authorization against a predefined policy. +type Token struct { + // Secret is used by the server to authenticate the user + Secret string `json:"secret"` + // Username is used by the server to assign roles in the metadata for authorization + Username string `json:"username"` +} + +// Encode returns a base64 encoded version of the JSON representation of token. +func (t *Token) Encode() (string, error) { + barr, err := json.Marshal(t) + if err != nil { + return "", err + } + s := base64.StdEncoding.EncodeToString(barr) + return s, nil +} + +// Decode updates the internals of Token using the passed in base64 +// encoded version of the JSON representation of token. +func (t *Token) Decode(s string) error { + barr, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return err + } + return json.Unmarshal(barr, t) +} diff --git a/examples/features/cancellation/client/main.go b/examples/features/cancellation/client/main.go index 58bd4b6f0180..248619f7a617 100644 --- a/examples/features/cancellation/client/main.go +++ b/examples/features/cancellation/client/main.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/status" ) @@ -55,7 +56,7 @@ func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/compression/client/main.go b/examples/features/compression/client/main.go index df6d825a3ee5..24c3bbac1089 100644 --- a/examples/features/compression/client/main.go +++ b/examples/features/compression/client/main.go @@ -27,6 +27,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" // Install the gzip compressor pb "google.golang.org/grpc/examples/features/proto/echo" ) @@ -37,7 +38,7 @@ func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/deadline/client/main.go b/examples/features/deadline/client/main.go index 026ce96f429a..8a4e3a2d26cc 100644 --- a/examples/features/deadline/client/main.go +++ b/examples/features/deadline/client/main.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/status" ) @@ -72,7 +73,7 @@ func streamingCall(c pb.EchoClient, requestID int, message string, want codes.Co func main() { flag.Parse() - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/deadline/server/main.go b/examples/features/deadline/server/main.go index 11cd47a6b5b3..ce3fc61679fc 100644 --- a/examples/features/deadline/server/main.go +++ b/examples/features/deadline/server/main.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" pb "google.golang.org/grpc/examples/features/proto/echo" @@ -94,7 +95,7 @@ func (s *server) Close() { func newEchoServer() *server { target := fmt.Sprintf("localhost:%v", *port) - cc, err := grpc.Dial(target, grpc.WithInsecure()) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/debugging/client/main.go b/examples/features/debugging/client/main.go index faf6b5d5fa2f..09acfa8112e5 100644 --- a/examples/features/debugging/client/main.go +++ b/examples/features/debugging/client/main.go @@ -21,13 +21,14 @@ package main import ( "context" + "flag" "log" "net" - "os" "time" "google.golang.org/grpc" "google.golang.org/grpc/channelz/service" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" @@ -38,9 +39,15 @@ const ( defaultName = "world" ) +var ( + addr = flag.String("addr", "localhost:50051", "the address to connect to") + name = flag.String("name", defaultName, "Name to greet") +) + func main() { + flag.Parse() /***** Set up the server serving channelz service. *****/ - lis, err := net.Listen("tcp", ":50052") + lis, err := net.Listen("tcp", *addr) if err != nil { log.Fatalf("failed to listen: %v", err) } @@ -53,7 +60,7 @@ func main() { /***** Initialize manual resolver and Dial *****/ r := manual.NewBuilderWithScheme("whatever") // Set up a connection to the server. - conn, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`)) + conn, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r), grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`)) if err != nil { log.Fatalf("did not connect: %v", err) } @@ -64,17 +71,13 @@ func main() { c := pb.NewGreeterClient(conn) // Contact the server and print out its response. - name := defaultName - if len(os.Args) > 1 { - name = os.Args[1] - } /***** Make 100 SayHello RPCs *****/ for i := 0; i < 100; i++ { // Setting a 150ms timeout on the RPC. ctx, cancel := context.WithTimeout(context.Background(), 150*time.Millisecond) defer cancel() - r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}) if err != nil { log.Printf("could not greet: %v", err) } else { diff --git a/examples/features/encryption/ALTS/client/main.go b/examples/features/encryption/ALTS/client/main.go index e2654f5865ff..aa090807ba34 100644 --- a/examples/features/encryption/ALTS/client/main.go +++ b/examples/features/encryption/ALTS/client/main.go @@ -50,7 +50,7 @@ func main() { altsTC := alts.NewClientCreds(alts.DefaultClientOptions()) // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(altsTC), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(altsTC)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/encryption/README.md b/examples/features/encryption/README.md index a00188d66a2d..e4ce22933230 100644 --- a/examples/features/encryption/README.md +++ b/examples/features/encryption/README.md @@ -42,8 +42,8 @@ configure TLS and create the server credential using On client side, we provide the path to the "ca_cert.pem" to configure TLS and create the client credential using [`credentials.NewClientTLSFromFile`](https://godoc.org/google.golang.org/grpc/credentials#NewClientTLSFromFile). -Note that we override the server name with "x.test.youtube.com", as the server -certificate is valid for *.test.youtube.com but not localhost. It is solely for +Note that we override the server name with "x.test.example.com", as the server +certificate is valid for *.test.example.com but not localhost. It is solely for the convenience of making an example. Once the credentials have been created at both sides, we can start the server @@ -84,4 +84,23 @@ Next, same as TLS, start the server with the server credential and let client dial to server with the client credential. Finally, make an RPC to test the secure connection based upon ALTS is -successfully up. \ No newline at end of file +successfully up. + +### mTLS + +In mutual TLS (mTLS), the client and the server authenticate each other. gRPC +allows users to configure mutual TLS at the connection level. + +In normal TLS, the server is only concerned with presenting the server +certificate for clients to verify. In mutual TLS, the server also loads in a +list of trusted CA files for verifying client presented certificates with. +This is done via setting +[`tls.Config.ClientCAs`](https://pkg.go.dev/crypto/tls#Config.ClientCAs) +to the list of trusted CA files, +and setting [`tls.config.ClientAuth`](https://pkg.go.dev/crypto/tls#Config.ClientAuth) +to [`tls.RequireAndVerifyClientCert`](https://pkg.go.dev/crypto/tls#RequireAndVerifyClientCert). + +In normal TLS, the client is only concerned with authenticating the server by +using one or more trusted CA file. In mutual TLS, the client also presents its +client certificate to the server for authentication. This is done via setting +[`tls.Config.Certificates`](https://pkg.go.dev/crypto/tls#Config.Certificates). diff --git a/examples/features/encryption/TLS/client/main.go b/examples/features/encryption/TLS/client/main.go index 718196b1bb41..4f78ccca0366 100644 --- a/examples/features/encryption/TLS/client/main.go +++ b/examples/features/encryption/TLS/client/main.go @@ -54,7 +54,7 @@ func main() { } // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/encryption/mTLS/client/main.go b/examples/features/encryption/mTLS/client/main.go new file mode 100644 index 000000000000..4bc1ef06defa --- /dev/null +++ b/examples/features/encryption/mTLS/client/main.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client which connects to the server using mTLS. +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "flag" + "fmt" + "log" + "os" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/examples/data" + ecpb "google.golang.org/grpc/examples/features/proto/echo" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") + +func callUnaryEcho(client ecpb.EchoClient, message string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + resp, err := client.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) + if err != nil { + log.Fatalf("client.UnaryEcho(_) = _, %v: ", err) + } + fmt.Println("UnaryEcho: ", resp.Message) +} + +func main() { + flag.Parse() + + cert, err := tls.LoadX509KeyPair(data.Path("x509/client_cert.pem"), data.Path("x509/client_key.pem")) + if err != nil { + log.Fatalf("failed to load client cert: %v", err) + } + + ca := x509.NewCertPool() + caFilePath := data.Path("x509/ca_cert.pem") + caBytes, err := os.ReadFile(caFilePath) + if err != nil { + log.Fatalf("failed to read ca cert %q: %v", caFilePath, err) + } + if ok := ca.AppendCertsFromPEM(caBytes); !ok { + log.Fatalf("failed to parse %q", caFilePath) + } + + tlsConfig := &tls.Config{ + ServerName: "x.test.example.com", + Certificates: []tls.Certificate{cert}, + RootCAs: ca, + } + + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + + callUnaryEcho(ecpb.NewEchoClient(conn), "hello world") +} diff --git a/examples/features/encryption/mTLS/server/main.go b/examples/features/encryption/mTLS/server/main.go new file mode 100644 index 000000000000..edd6829dcb91 --- /dev/null +++ b/examples/features/encryption/mTLS/server/main.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server which authenticates clients using mTLS. +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "flag" + "fmt" + "log" + "net" + "os" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/examples/data" + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var port = flag.Int("port", 50051, "the port to serve on") + +type ecServer struct { + pb.UnimplementedEchoServer +} + +func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { + return &pb.EchoResponse{Message: req.Message}, nil +} + +func main() { + flag.Parse() + log.Printf("server starting on port %d...\n", *port) + + cert, err := tls.LoadX509KeyPair(data.Path("x509/server_cert.pem"), data.Path("x509/server_key.pem")) + if err != nil { + log.Fatalf("failed to load key pair: %s", err) + } + + ca := x509.NewCertPool() + caFilePath := data.Path("x509/client_ca_cert.pem") + caBytes, err := os.ReadFile(caFilePath) + if err != nil { + log.Fatalf("failed to read ca cert %q: %v", caFilePath, err) + } + if ok := ca.AppendCertsFromPEM(caBytes); !ok { + log.Fatalf("failed to parse %q", caFilePath) + } + + tlsConfig := &tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{cert}, + ClientCAs: ca, + } + + s := grpc.NewServer(grpc.Creds(credentials.NewTLS(tlsConfig))) + pb.RegisterEchoServer(s, &ecServer{}) + lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/examples/features/errors/README.md b/examples/features/error_details/README.md similarity index 100% rename from examples/features/errors/README.md rename to examples/features/error_details/README.md diff --git a/examples/features/errors/client/main.go b/examples/features/error_details/client/main.go similarity index 92% rename from examples/features/errors/client/main.go rename to examples/features/error_details/client/main.go index b87fb48a9bbd..7f905f82bef3 100644 --- a/examples/features/errors/client/main.go +++ b/examples/features/error_details/client/main.go @@ -28,6 +28,7 @@ import ( epb "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/helloworld/helloworld" "google.golang.org/grpc/status" ) @@ -38,7 +39,7 @@ func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/errors/server/main.go b/examples/features/error_details/server/main.go similarity index 100% rename from examples/features/errors/server/main.go rename to examples/features/error_details/server/main.go diff --git a/examples/features/error_handling/README.md b/examples/features/error_handling/README.md new file mode 100644 index 000000000000..c6c4ba2c2e2d --- /dev/null +++ b/examples/features/error_handling/README.md @@ -0,0 +1,22 @@ +# Description + +This example demonstrates basic RPC error handling in gRPC. + +# Run the sample code + +Run the server, which returns an error if the RPC request's `Name` field is +empty. + +```sh +$ go run ./server/main.go +``` + +Then run the client in another terminal, which does two requests: one with an +empty Name field and one with it populated with the current username provided by +os/user. + +```sh +$ go run ./client/main.go +``` + +It should print the status codes it received from the server. diff --git a/examples/features/error_handling/client/main.go b/examples/features/error_handling/client/main.go new file mode 100644 index 000000000000..bd4ec0a1d33f --- /dev/null +++ b/examples/features/error_handling/client/main.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client. +package main + +import ( + "context" + "flag" + "log" + "os/user" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "google.golang.org/grpc/status" +) + +var addr = flag.String("addr", "localhost:50052", "the address to connect to") + +func main() { + flag.Parse() + + name := "unknown" + if u, err := user.Current(); err == nil && u.Username != "" { + name = u.Username + } + + // Set up a connection to the server. + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("Failed to connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + for _, reqName := range []string{"", name} { + log.Printf("Calling SayHello with Name:%q", reqName) + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: reqName}) + if err != nil { + if status.Code(err) != codes.InvalidArgument { + log.Printf("Received unexpected error: %v", err) + continue + } + log.Printf("Received error: %v", err) + continue + } + log.Printf("Received response: %s", r.Message) + } +} diff --git a/examples/features/error_handling/server/main.go b/examples/features/error_handling/server/main.go new file mode 100644 index 000000000000..4471c560add9 --- /dev/null +++ b/examples/features/error_handling/server/main.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + pb "google.golang.org/grpc/examples/helloworld/helloworld" +) + +var port = flag.Int("port", 50052, "port number") + +// server is used to implement helloworld.GreeterServer. +type server struct { + pb.UnimplementedGreeterServer +} + +// SayHello implements helloworld.GreeterServer. +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + if in.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "request missing required field: Name") + } + return &pb.HelloReply{Message: "Hello " + in.Name}, nil +} + +func main() { + flag.Parse() + + address := fmt.Sprintf(":%v", *port) + lis, err := net.Listen("tcp", address) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/examples/features/health/client/main.go b/examples/features/health/client/main.go index 9cbc03f90a47..63b4717b5257 100644 --- a/examples/features/health/client/main.go +++ b/examples/features/health/client/main.go @@ -27,6 +27,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" _ "google.golang.org/grpc/health" "google.golang.org/grpc/resolver" @@ -65,7 +66,7 @@ func main() { address := fmt.Sprintf("%s:///unused", r.Scheme()) options := []grpc.DialOption{ - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), grpc.WithResolvers(r), grpc.WithDefaultServiceConfig(serviceConfig), @@ -73,7 +74,7 @@ func main() { conn, err := grpc.Dial(address, options...) if err != nil { - log.Fatalf("did not connect %v", err) + log.Fatalf("grpc.Dial(%q): %v", address, err) } defer conn.Close() diff --git a/examples/features/health/server/main.go b/examples/features/health/server/main.go index 3f79c8ba3470..65039b38d5be 100644 --- a/examples/features/health/server/main.go +++ b/examples/features/health/server/main.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/health" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" ) @@ -62,7 +63,7 @@ func main() { s := grpc.NewServer() healthcheck := health.NewServer() - healthpb.RegisterHealthServer(s, healthcheck) + healthgrpc.RegisterHealthServer(s, healthcheck) pb.RegisterEchoServer(s, &echoServer{}) go func() { diff --git a/examples/features/interceptor/client/main.go b/examples/features/interceptor/client/main.go index 0c2015169d17..0832e4861cdd 100644 --- a/examples/features/interceptor/client/main.go +++ b/examples/features/interceptor/client/main.go @@ -55,9 +55,9 @@ func unaryInterceptor(ctx context.Context, method string, req, reply interface{} } } if !credsConfigured { - opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{ - AccessToken: fallbackToken, - }))) + opts = append(opts, grpc.PerRPCCredentials(oauth.TokenSource{ + TokenSource: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: fallbackToken}), + })) } start := time.Now() err := invoker(ctx, method, req, reply, cc, opts...) @@ -97,9 +97,9 @@ func streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.Clie } } if !credsConfigured { - opts = append(opts, grpc.PerRPCCredentials(oauth.NewOauthAccess(&oauth2.Token{ - AccessToken: fallbackToken, - }))) + opts = append(opts, grpc.PerRPCCredentials(oauth.TokenSource{ + TokenSource: oauth2.StaticTokenSource(&oauth2.Token{AccessToken: fallbackToken}), + })) } s, err := streamer(ctx, desc, cc, method, opts...) if err != nil { @@ -153,7 +153,7 @@ func main() { } // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds), grpc.WithUnaryInterceptor(unaryInterceptor), grpc.WithStreamInterceptor(streamInterceptor), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(creds), grpc.WithUnaryInterceptor(unaryInterceptor), grpc.WithStreamInterceptor(streamInterceptor)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/interceptor/server/main.go b/examples/features/interceptor/server/main.go index 1b07cdecd6ca..78b87aae3472 100644 --- a/examples/features/interceptor/server/main.go +++ b/examples/features/interceptor/server/main.go @@ -98,7 +98,7 @@ func unaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServ } m, err := handler(ctx, req) if err != nil { - logger("RPC failed with error %v", err) + logger("RPC failed with error: %v", err) } return m, err } @@ -135,7 +135,7 @@ func streamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamS err := handler(srv, newWrappedStream(ss)) if err != nil { - logger("RPC failed with error %v", err) + logger("RPC failed with error: %v", err) } return err } diff --git a/examples/features/keepalive/client/main.go b/examples/features/keepalive/client/main.go index a8cfbc5c4541..feb9b664bf4e 100644 --- a/examples/features/keepalive/client/main.go +++ b/examples/features/keepalive/client/main.go @@ -27,6 +27,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/keepalive" ) @@ -42,7 +43,7 @@ var kacp = keepalive.ClientParameters{ func main() { flag.Parse() - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithKeepaliveParams(kacp)) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithKeepaliveParams(kacp)) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/load_balancing/client/main.go b/examples/features/load_balancing/client/main.go index 1578df16671b..6e3d1fc86fe3 100644 --- a/examples/features/load_balancing/client/main.go +++ b/examples/features/load_balancing/client/main.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/resolver" ) @@ -55,11 +56,10 @@ func makeRPCs(cc *grpc.ClientConn, n int) { } func main() { - // "pick_first" is the default, so there's no need to set the load balancer. + // "pick_first" is the default, so there's no need to set the load balancing policy. pickfirstConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), - grpc.WithInsecure(), - grpc.WithBlock(), + grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatalf("did not connect: %v", err) @@ -74,9 +74,8 @@ func main() { // Make another ClientConn with round_robin policy. roundrobinConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), - grpc.WithDefaultServiceConfig(`{"loadBalancingPolicy":"round_robin"}`), // This sets the initial balancing policy. - grpc.WithInsecure(), - grpc.WithBlock(), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), // This sets the initial balancing policy. + grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatalf("did not connect: %v", err) @@ -112,7 +111,7 @@ type exampleResolver struct { } func (r *exampleResolver) start() { - addrStrs := r.addrsStore[r.target.Endpoint] + addrStrs := r.addrsStore[r.target.Endpoint()] addrs := make([]resolver.Address, len(addrStrs)) for i, s := range addrStrs { addrs[i] = resolver.Address{Addr: s} diff --git a/examples/features/metadata/client/main.go b/examples/features/metadata/client/main.go index 715fb6f5acbd..97e7fd40cf45 100644 --- a/examples/features/metadata/client/main.go +++ b/examples/features/metadata/client/main.go @@ -28,6 +28,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/metadata" ) @@ -286,7 +287,7 @@ const message = "this is examples/metadata" func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/metadata_interceptor/README.md b/examples/features/metadata_interceptor/README.md new file mode 100644 index 000000000000..93a6925d79ef --- /dev/null +++ b/examples/features/metadata_interceptor/README.md @@ -0,0 +1,70 @@ +# Metadata interceptor example + +This example shows how to update metadata from unary and streaming interceptors on the server. +Please see +[grpc-metadata.md](https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +for more information. + +## Try it + +``` +go run server/main.go +``` + +``` +go run client/main.go +``` + +## Explanation + +#### Unary interceptor + +The interceptor can read existing metadata from the RPC context passed to it. +Since Go contexts are immutable, the interceptor will have to create a new context +with updated metadata and pass it to the provided handler. + +```go +func SomeInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // Get the incoming metadata from the RPC context, and add a new + // key-value pair to it. + md, ok := metadata.FromIncomingContext(ctx) + md.Append("key1", "value1") + + // Create a context with the new metadata and pass it to handler. + ctx = metadata.NewIncomingContext(ctx, md) + return handler(ctx, req) +} +``` + +#### Streaming interceptor + +`grpc.ServerStream` does not provide a way to modify its RPC context. The streaming +interceptor therefore needs to implement the `grpc.ServerStream` interface and return +a context with updated metadata. + +The easiest way to do this would be to create a type which embeds the `grpc.ServerStream` +interface and overrides only the `Context()` method to return a context with updated +metadata. The streaming interceptor would then pass this wrapped stream to the provided handler. + +```go +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (s *wrappedStream) Context() context.Context { + return s.ctx +} + +func SomeStreamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // Get the incoming metadata from the RPC context, and add a new + // key-value pair to it. + md, ok := metadata.FromIncomingContext(ctx) + md.Append("key1", "value1") + + // Create a context with the new metadata and pass it to handler. + ctx = metadata.NewIncomingContext(ctx, md) + + return handler(srv, &wrappedStream{ss, ctx}) +} +``` \ No newline at end of file diff --git a/examples/features/metadata_interceptor/client/main.go b/examples/features/metadata_interceptor/client/main.go new file mode 100644 index 000000000000..5e1bebec12ae --- /dev/null +++ b/examples/features/metadata_interceptor/client/main.go @@ -0,0 +1,86 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client. +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") + +func callUnaryEcho(ctx context.Context, client pb.EchoClient) { + resp, err := client.UnaryEcho(ctx, &pb.EchoRequest{Message: "hello world"}) + if err != nil { + log.Fatalf("UnaryEcho: %v", err) + } + fmt.Println("UnaryEcho: ", resp.Message) +} + +func callBidiStreamingEcho(ctx context.Context, client pb.EchoClient) { + c, err := client.BidirectionalStreamingEcho(ctx) + if err != nil { + log.Fatalf("BidiStreamingEcho: %v", err) + } + + if err := c.Send(&pb.EchoRequest{Message: "hello world"}); err != nil { + log.Fatalf("Sending echo request: %v", err) + } + c.CloseSend() + + for { + resp, err := c.Recv() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("Receiving echo response: %v", err) + } + fmt.Println("BidiStreaming Echo: ", resp.Message) + } +} + +func main() { + flag.Parse() + + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("grpc.Dial(%q): %v", *addr, err) + } + defer conn.Close() + + ec := pb.NewEchoClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + callUnaryEcho(ctx, ec) + + callBidiStreamingEcho(ctx, ec) +} diff --git a/examples/features/metadata_interceptor/server/main.go b/examples/features/metadata_interceptor/server/main.go new file mode 100644 index 000000000000..8f0dc5bfe6d4 --- /dev/null +++ b/examples/features/metadata_interceptor/server/main.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server. +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var port = flag.Int("port", 50051, "the port to serve on") + +var errMissingMetadata = status.Errorf(codes.InvalidArgument, "no incoming metadata in rpc context") + +type server struct { + pb.UnimplementedEchoServer +} + +func unaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errMissingMetadata + } + + md.Append("key1", "value1") + ctx = metadata.NewIncomingContext(ctx, md) + + return handler(ctx, req) +} + +func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { + fmt.Printf("--- UnaryEcho ---\n") + + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Internal, "UnaryEcho: missing incoming metadata in rpc context") + } + + // Read and print metadata added by the interceptor. + if v, ok := md["key1"]; ok { + fmt.Printf("key1 from metadata: \n") + for i, e := range v { + fmt.Printf(" %d. %s\n", i, e) + } + } + + return &pb.EchoResponse{Message: in.Message}, nil +} + +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (s *wrappedStream) Context() context.Context { + return s.ctx +} + +func streamInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + md, ok := metadata.FromIncomingContext(ss.Context()) + if !ok { + return errMissingMetadata + } + + md.Append("key1", "value1") + ctx := metadata.NewIncomingContext(ss.Context(), md) + + return handler(srv, &wrappedStream{ss, ctx}) +} + +func (s *server) BidirectionalStreamingEcho(stream pb.Echo_BidirectionalStreamingEchoServer) error { + fmt.Printf("--- BidirectionalStreamingEcho ---\n") + + md, ok := metadata.FromIncomingContext(stream.Context()) + if !ok { + return status.Errorf(codes.Internal, "BidirectionalStreamingEcho: missing incoming metadata in rpc context") + } + + // Read and print metadata added by the interceptor. + if v, ok := md["key1"]; ok { + fmt.Printf("key1 from metadata: \n") + for i, e := range v { + fmt.Printf(" %d. %s\n", i, e) + } + } + + // Read requests and send responses. + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + if err = stream.Send(&pb.EchoResponse{Message: in.Message}); err != nil { + return err + } + } +} + +func main() { + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("net.Listen() failed: %v", err) + } + fmt.Printf("Server listening at %v\n", lis.Addr()) + + s := grpc.NewServer(grpc.UnaryInterceptor(unaryInterceptor), grpc.StreamInterceptor(streamInterceptor)) + pb.RegisterEchoServer(s, &server{}) + s.Serve(lis) +} diff --git a/examples/features/multiplex/client/main.go b/examples/features/multiplex/client/main.go index 72d6cd56775b..3cd85240a335 100644 --- a/examples/features/multiplex/client/main.go +++ b/examples/features/multiplex/client/main.go @@ -27,6 +27,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ecpb "google.golang.org/grpc/examples/features/proto/echo" hwpb "google.golang.org/grpc/examples/helloworld/helloworld" ) @@ -58,7 +59,7 @@ func callUnaryEcho(client ecpb.EchoClient, message string) { func main() { flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/name_resolving/client/main.go b/examples/features/name_resolving/client/main.go index 1c56dcce15df..2766611ba795 100644 --- a/examples/features/name_resolving/client/main.go +++ b/examples/features/name_resolving/client/main.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ecpb "google.golang.org/grpc/examples/features/proto/echo" "google.golang.org/grpc/resolver" ) @@ -57,8 +58,7 @@ func makeRPCs(cc *grpc.ClientConn, n int) { func main() { passthroughConn, err := grpc.Dial( fmt.Sprintf("passthrough:///%s", backendAddr), // Dial to "passthrough:///localhost:50051" - grpc.WithInsecure(), - grpc.WithBlock(), + grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatalf("did not connect: %v", err) @@ -72,8 +72,7 @@ func main() { exampleConn, err := grpc.Dial( fmt.Sprintf("%s:///%s", exampleScheme, exampleServiceName), // Dial to "example:///resolver.example.grpc.io" - grpc.WithInsecure(), - grpc.WithBlock(), + grpc.WithTransportCredentials(insecure.NewCredentials()), ) if err != nil { log.Fatalf("did not connect: %v", err) @@ -120,7 +119,7 @@ type exampleResolver struct { } func (r *exampleResolver) start() { - addrStrs := r.addrsStore[r.target.Endpoint] + addrStrs := r.addrsStore[r.target.Endpoint()] addrs := make([]resolver.Address, len(addrStrs)) for i, s := range addrStrs { addrs[i] = resolver.Address{Addr: s} diff --git a/examples/features/observability/README.md b/examples/features/observability/README.md new file mode 100644 index 000000000000..f2aa52101069 --- /dev/null +++ b/examples/features/observability/README.md @@ -0,0 +1,3 @@ +This example is the Hello World example instrumented for logs, metrics, and tracing. + +Please refer to Microservices Observability user guide for setup. diff --git a/examples/features/observability/client/clientConfig.json b/examples/features/observability/client/clientConfig.json new file mode 100644 index 000000000000..b98ae25e1b77 --- /dev/null +++ b/examples/features/observability/client/clientConfig.json @@ -0,0 +1,17 @@ +{ + "cloud_monitoring": {}, + "cloud_trace": { + "sampling_rate": 1.0 + }, + "cloud_logging": { + "client_rpc_events": [{ + "methods": ["*"] + }], + "server_rpc_events": [{ + "methods": ["*"] + }] + }, + "labels": { + "environment" : "example-client" + } +} diff --git a/examples/features/observability/client/main.go b/examples/features/observability/client/main.go new file mode 100644 index 000000000000..4c1d994a30dd --- /dev/null +++ b/examples/features/observability/client/main.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package main implements a client for Greeter service. +package main + +import ( + "context" + "flag" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "google.golang.org/grpc/gcp/observability" +) + +const ( + defaultName = "world" +) + +var ( + addr = flag.String("addr", "localhost:50051", "the address to connect to") + name = flag.String("name", defaultName, "Name to greet") +) + +func main() { + // Turn on global telemetry for the whole binary. If a configuration is + // specified, any created gRPC Client Conn's or Servers will emit telemetry + // data according the the configuration. + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := observability.Start(ctx) + if err != nil { + log.Fatalf("observability.Start() failed: %v", err) + } + defer observability.End() + + flag.Parse() + // Set up a connection to the server. + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + c := pb.NewGreeterClient(conn) + + // Contact the server and print out its response. + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + log.Printf("Greeting: %s", r.GetMessage()) +} diff --git a/examples/features/observability/go.mod b/examples/features/observability/go.mod new file mode 100644 index 000000000000..a7b3e2f8dc38 --- /dev/null +++ b/examples/features/observability/go.mod @@ -0,0 +1,43 @@ +module google.golang.org/grpc/examples/features/observability + +go 1.17 + +require ( + google.golang.org/grpc v1.54.0 + google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc + google.golang.org/grpc/gcp/observability v1.0.0 +) + +require ( + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/logging v1.7.0 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect + cloud.google.com/go/trace v1.9.0 // indirect + contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect + github.com/aws/aws-sdk-go v1.44.162 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/prometheus/prometheus v2.5.0+incompatible // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/oauth2 v0.6.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + google.golang.org/api v0.114.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/grpc/stats/opencensus v1.0.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) diff --git a/examples/features/observability/go.sum b/examples/features/observability/go.sum new file mode 100644 index 000000000000..d28df85cd149 --- /dev/null +++ b/examples/features/observability/go.sum @@ -0,0 +1,644 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= +github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc h1:H58v4RmBwciuKpwU6NFUn3w2hPZNL78HedaJUitCdpI= +google.golang.org/grpc/examples v0.0.0-20230323213306-0fdfd40215dc/go.mod h1:EXfxRt8PpWkTFBAXaWXB0Xgb1S/FFBXvFRry0nr2bHQ= +google.golang.org/grpc/gcp/observability v1.0.0 h1:YkGqlAFEFM69+GDI8MnuSV4RTvBWkx4AKealZ+yGizY= +google.golang.org/grpc/gcp/observability v1.0.0/go.mod h1:SmWxljYyQOJWPALwV6WhM3PdbH7sQsrCYIzlRy2PY00= +google.golang.org/grpc/stats/opencensus v1.0.0 h1:evSYcRZaSToQp+borzWE52+03joezZeXcKJvZDfkUJA= +google.golang.org/grpc/stats/opencensus v1.0.0/go.mod h1:FhdkeYvN43wLYUnapVuRJJ9JXkNwe403iLUW2LKSnjs= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/examples/features/observability/server/main.go b/examples/features/observability/server/main.go new file mode 100644 index 000000000000..0aae0699342d --- /dev/null +++ b/examples/features/observability/server/main.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package main implements a server for Greeter service. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "os" + "os/signal" + "syscall" + "time" + + "google.golang.org/grpc" + pb "google.golang.org/grpc/examples/helloworld/helloworld" + "google.golang.org/grpc/gcp/observability" +) + +var ( + port = flag.Int("port", 50051, "The server port") +) + +// server is used to implement helloworld.GreeterServer. +type server struct { + pb.UnimplementedGreeterServer +} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + log.Printf("Received: %v", in.GetName()) + return &pb.HelloReply{Message: "Hello " + in.GetName()}, nil +} + +func main() { + // Turn on global telemetry for the whole binary. If a configuration is + // specified, any created gRPC Client Conn's or Servers will emit telemetry + // data according the the configuration. + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := observability.Start(ctx) + if err != nil { + log.Fatalf("observability.Start() failed: %v", err) + } + defer observability.End() + + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + pb.RegisterGreeterServer(s, &server{}) + log.Printf("server listening at %v", lis.Addr()) + + // This server can potentially be terminated by an external signal from the + // Operating System. The following catches those signals and calls s.Stop(). + // This causes the s.Serve() call to return and run main()'s defers, + // including the observability.End() call that ensures any pending + // observability data is sent to Cloud Operations. + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + s.Stop() + }() + + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/examples/features/observability/server/serverConfig.json b/examples/features/observability/server/serverConfig.json new file mode 100644 index 000000000000..a2bf7f6b6e74 --- /dev/null +++ b/examples/features/observability/server/serverConfig.json @@ -0,0 +1,17 @@ +{ + "cloud_monitoring": {}, + "cloud_trace": { + "sampling_rate": 1.0 + }, + "cloud_logging": { + "client_rpc_events": [{ + "methods": ["*"] + }], + "server_rpc_events": [{ + "methods": ["*"] + }] + }, + "labels": { + "environment" : "example-server" + } +} diff --git a/examples/features/orca/README.md b/examples/features/orca/README.md new file mode 100644 index 000000000000..ef99aa255ba5 --- /dev/null +++ b/examples/features/orca/README.md @@ -0,0 +1,48 @@ +# ORCA Load Reporting + +ORCA is a protocol for reporting load between servers and clients. This +example shows how to implement this from both the client and server side. For +more details, please see [gRFC +A51](https://github.com/grpc/proposal/blob/master/A51-custom-backend-metrics.md) + +## Try it + +``` +go run server/main.go +``` + +``` +go run client/main.go +``` + +## Explanation + +gRPC ORCA support provides two different ways to report load data to clients +from servers: out-of-band and per-RPC. Out-of-band metrics are reported +regularly at some interval on a stream, while per-RPC metrics are reported +along with the trailers at the end of a call. Both of these mechanisms are +optional and work independently. + +The full ORCA API documentation is available here: +https://pkg.go.dev/google.golang.org/grpc/orca + +### Out-of-band Metrics + +The server registers an ORCA service that is used for out-of-band metrics. It +does this by using `orca.Register()` and then setting metrics on the returned +`orca.Service` using its methods. + +The client receives out-of-band metrics via the LB policy. It receives +callbacks to a listener by registering the listener on a `SubConn` via +`orca.RegisterOOBListener`. + +### Per-RPC Metrics + +The server is set up to report query cost metrics in its RPC handler. For +per-RPC metrics to be reported, the gRPC server must be created with the +`orca.CallMetricsServerOption()` option, and metrics are set by calling methods +on the returned `orca.CallMetricRecorder` from +`orca.CallMetricRecorderFromContext()`. + +The client performs one RPC per second. Per-RPC metrics are available for each +call via the `Done()` callback returned from the LB policy's picker. diff --git a/examples/features/orca/client/main.go b/examples/features/orca/client/main.go new file mode 100644 index 000000000000..f295cfd3866a --- /dev/null +++ b/examples/features/orca/client/main.go @@ -0,0 +1,153 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/orca" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") +var test = flag.Bool("test", false, "if set, only 1 RPC is performed before exiting") + +func main() { + flag.Parse() + + // Set up a connection to the server. Configure to use our custom LB + // policy which will receive all the ORCA load reports. + conn, err := grpc.Dial(*addr, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"orca_example":{}}]}`), + ) + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + + c := pb.NewEchoClient(conn) + + // Perform RPCs once per second. + ticker := time.NewTicker(time.Second) + for range ticker.C { + func() { + // Use an anonymous function to ensure context cancelation via defer. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if _, err := c.UnaryEcho(ctx, &pb.EchoRequest{Message: "test echo message"}); err != nil { + log.Fatalf("Error from UnaryEcho call: %v", err) + } + }() + if *test { + return + } + } + +} + +// Register an ORCA load balancing policy to receive per-call metrics and +// out-of-band metrics. +func init() { + balancer.Register(orcaLBBuilder{}) +} + +type orcaLBBuilder struct{} + +func (orcaLBBuilder) Name() string { return "orca_example" } +func (orcaLBBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &orcaLB{cc: cc} +} + +// orcaLB is an incomplete LB policy designed to show basic ORCA load reporting +// functionality. It collects per-call metrics in the `Done` callback returned +// by its picker, and it collects out-of-band metrics by registering a listener +// when its SubConn is created. It does not follow general LB policy best +// practices and makes assumptions about the simple test environment it is +// designed to run within. +type orcaLB struct { + cc balancer.ClientConn +} + +func (o *orcaLB) UpdateClientConnState(ccs balancer.ClientConnState) error { + // We assume only one update, ever, containing exactly one address, given + // the use of the "passthrough" (default) name resolver. + + addrs := ccs.ResolverState.Addresses + if len(addrs) != 1 { + return fmt.Errorf("orcaLB: expected 1 address; received: %v", addrs) + } + + // Create one SubConn for the address and connect it. + sc, err := o.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + return fmt.Errorf("orcaLB: error creating SubConn: %v", err) + } + sc.Connect() + + // Register a simple ORCA OOB listener on the SubConn. We request a 1 + // second report interval, but in this example the server indicated the + // minimum interval it will allow is 3 seconds, so reports will only be + // sent that often. + orca.RegisterOOBListener(sc, orcaLis{}, orca.OOBListenerOptions{ReportInterval: time.Second}) + + return nil +} + +func (o *orcaLB) ResolverError(error) {} + +func (o *orcaLB) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + if scs.ConnectivityState == connectivity.Ready { + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &picker{sc}}) + } +} + +func (o *orcaLB) Close() {} + +type picker struct { + sc balancer.SubConn +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{ + SubConn: p.sc, + Done: func(di balancer.DoneInfo) { + fmt.Println("Per-call load report received:", di.ServerLoad.(*v3orcapb.OrcaLoadReport).GetRequestCost()) + }, + }, nil +} + +// orcaLis is the out-of-band load report listener that we pass to +// orca.RegisterOOBListener to receive periodic load report information. +type orcaLis struct{} + +func (orcaLis) OnLoadReport(lr *v3orcapb.OrcaLoadReport) { + fmt.Println("Out-of-band load report received:", lr) +} diff --git a/examples/features/orca/server/main.go b/examples/features/orca/server/main.go new file mode 100644 index 000000000000..e52d5d06eebf --- /dev/null +++ b/examples/features/orca/server/main.go @@ -0,0 +1,96 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/status" + + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var port = flag.Int("port", 50051, "the port to serve on") + +type server struct { + pb.UnimplementedEchoServer +} + +func (s *server) UnaryEcho(ctx context.Context, in *pb.EchoRequest) (*pb.EchoResponse, error) { + // Report a sample cost for this query. + cmr := orca.CallMetricsRecorderFromContext(ctx) + if cmr == nil { + return nil, status.Errorf(codes.Internal, "unable to retrieve call metrics recorder (missing ORCA ServerOption?)") + } + cmr.SetRequestCost("db_queries", 10) + + return &pb.EchoResponse{Message: in.Message}, nil +} + +func main() { + flag.Parse() + + lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) + if err != nil { + log.Fatalf("Failed to listen: %v", err) + } + fmt.Printf("Server listening at %v\n", lis.Addr()) + + // Create the gRPC server with the orca.CallMetricsServerOption() option, + // which will enable per-call metric recording. No ServerMetricsProvider + // is given here because the out-of-band reporting is enabled separately. + s := grpc.NewServer(orca.CallMetricsServerOption(nil)) + pb.RegisterEchoServer(s, &server{}) + + // Register the orca service for out-of-band metric reporting, and set the + // minimum reporting interval to 3 seconds. Note that, by default, the + // minimum interval must be at least 30 seconds, but 3 seconds is set via + // an internal-only option for illustration purposes only. + smr := orca.NewServerMetricsRecorder() + opts := orca.ServiceOptions{ + MinReportingInterval: 3 * time.Second, + ServerMetricsProvider: smr, + } + internal.ORCAAllowAnyMinReportingInterval.(func(so *orca.ServiceOptions))(&opts) + if err := orca.Register(s, opts); err != nil { + log.Fatalf("Failed to register ORCA service: %v", err) + } + + // Simulate CPU utilization reporting. + go func() { + for { + smr.SetCPUUtilization(.5) + time.Sleep(2 * time.Second) + smr.SetCPUUtilization(.9) + time.Sleep(2 * time.Second) + } + }() + + s.Serve(lis) +} diff --git a/examples/features/proto/echo/echo.pb.go b/examples/features/proto/echo/echo.pb.go index 5af638d5280e..c46af9a08bc8 100644 --- a/examples/features/proto/echo/echo.pb.go +++ b/examples/features/proto/echo/echo.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: examples/features/proto/echo/echo.proto package echo import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // EchoRequest is the request for echo. type EchoRequest struct { state protoimpl.MessageState diff --git a/examples/features/proto/echo/echo_grpc.pb.go b/examples/features/proto/echo/echo_grpc.pb.go index 052087dae369..7efd51403fb9 100644 --- a/examples/features/proto/echo/echo_grpc.pb.go +++ b/examples/features/proto/echo/echo_grpc.pb.go @@ -1,4 +1,25 @@ +// +// +// Copyright 2018 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: examples/features/proto/echo/echo.proto package echo @@ -14,6 +35,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Echo_UnaryEcho_FullMethodName = "/grpc.examples.echo.Echo/UnaryEcho" + Echo_ServerStreamingEcho_FullMethodName = "/grpc.examples.echo.Echo/ServerStreamingEcho" + Echo_ClientStreamingEcho_FullMethodName = "/grpc.examples.echo.Echo/ClientStreamingEcho" + Echo_BidirectionalStreamingEcho_FullMethodName = "/grpc.examples.echo.Echo/BidirectionalStreamingEcho" +) + // EchoClient is the client API for Echo service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -38,7 +66,7 @@ func NewEchoClient(cc grpc.ClientConnInterface) EchoClient { func (c *echoClient) UnaryEcho(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (*EchoResponse, error) { out := new(EchoResponse) - err := c.cc.Invoke(ctx, "/grpc.examples.echo.Echo/UnaryEcho", in, out, opts...) + err := c.cc.Invoke(ctx, Echo_UnaryEcho_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -46,7 +74,7 @@ func (c *echoClient) UnaryEcho(ctx context.Context, in *EchoRequest, opts ...grp } func (c *echoClient) ServerStreamingEcho(ctx context.Context, in *EchoRequest, opts ...grpc.CallOption) (Echo_ServerStreamingEchoClient, error) { - stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[0], "/grpc.examples.echo.Echo/ServerStreamingEcho", opts...) + stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[0], Echo_ServerStreamingEcho_FullMethodName, opts...) if err != nil { return nil, err } @@ -78,7 +106,7 @@ func (x *echoServerStreamingEchoClient) Recv() (*EchoResponse, error) { } func (c *echoClient) ClientStreamingEcho(ctx context.Context, opts ...grpc.CallOption) (Echo_ClientStreamingEchoClient, error) { - stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[1], "/grpc.examples.echo.Echo/ClientStreamingEcho", opts...) + stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[1], Echo_ClientStreamingEcho_FullMethodName, opts...) if err != nil { return nil, err } @@ -112,7 +140,7 @@ func (x *echoClientStreamingEchoClient) CloseAndRecv() (*EchoResponse, error) { } func (c *echoClient) BidirectionalStreamingEcho(ctx context.Context, opts ...grpc.CallOption) (Echo_BidirectionalStreamingEchoClient, error) { - stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[2], "/grpc.examples.echo.Echo/BidirectionalStreamingEcho", opts...) + stream, err := c.cc.NewStream(ctx, &Echo_ServiceDesc.Streams[2], Echo_BidirectionalStreamingEcho_FullMethodName, opts...) if err != nil { return nil, err } @@ -196,7 +224,7 @@ func _Echo_UnaryEcho_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.examples.echo.Echo/UnaryEcho", + FullMethod: Echo_UnaryEcho_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(EchoServer).UnaryEcho(ctx, req.(*EchoRequest)) diff --git a/examples/features/retry/README.md b/examples/features/retry/README.md index f56d438adc2b..e39a1c71704d 100644 --- a/examples/features/retry/README.md +++ b/examples/features/retry/README.md @@ -18,11 +18,10 @@ First start the server: go run server/main.go ``` -Then run the client. Note that when running the client, `GRPC_GO_RETRY=on` must be set in -your environment: +Then run the client: ```bash -GRPC_GO_RETRY=on go run client/main.go +go run client/main.go ``` ## Usage @@ -62,5 +61,5 @@ To use the above service config, pass it with `grpc.WithDefaultServiceConfig` to `grpc.Dial`. ```go -conn, err := grpc.Dial(ctx,grpc.WithInsecure(), grpc.WithDefaultServiceConfig(retryPolicy)) +conn, err := grpc.Dial(ctx,grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(retryPolicy)) ``` diff --git a/examples/features/retry/client/main.go b/examples/features/retry/client/main.go index 73147cfe0a27..3b9b80e24ba7 100644 --- a/examples/features/retry/client/main.go +++ b/examples/features/retry/client/main.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/features/proto/echo" ) @@ -48,7 +49,7 @@ var ( // use grpc.WithDefaultServiceConfig() to set service config func retryDial() (*grpc.ClientConn, error) { - return grpc.Dial(*addr, grpc.WithInsecure(), grpc.WithDefaultServiceConfig(retryPolicy)) + return grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(retryPolicy)) } func main() { diff --git a/examples/features/stats_monitoring/README.md b/examples/features/stats_monitoring/README.md new file mode 100644 index 000000000000..079b6b4f1ee6 --- /dev/null +++ b/examples/features/stats_monitoring/README.md @@ -0,0 +1,58 @@ +# Stats Monitoring Handler + +This example demonstrates the use of the [`stats`](https://pkg.go.dev/google.golang.org/grpc/stats) package for reporting various +network and RPC stats. +_Note that all fields are READ-ONLY and the APIs of the `stats` package are +experimental_. + +## Try it + +``` +go run server/main.go +``` + +``` +go run client/main.go +``` + +## Explanation + +gRPC provides a mechanism to hook on to various events (phases) of the +request-response network cycle through the [`stats.Handler`](https://pkg.go.dev/google.golang.org/grpc/stats#Handler) interface. To access +these events, a concrete type that implements `stats.Handler` should be passed to +`grpc.WithStatsHandler()` on the client side and `grpc.StatsHandler()` on the +server side. + +The `HandleRPC(context.Context, RPCStats)` method on `stats.Handler` is called +multiple times during a request-response cycle, and various event stats are +passed to its `RPCStats` parameter (an interface). The concrete types that +implement this interface are: `*stats.Begin`, `*stats.InHeader`, `*stats.InPayload`, +`*stats.InTrailer`, `*stats.OutHeader`, `*stats.OutPayload`, `*stats.OutTrailer`, and +`*stats.End`. The order of these events differs on client and server. + +Similarly, the `HandleConn(context.Context, ConnStats)` method on `stats.Handler` +is called twice, once at the beginning of the connection with `*stats.ConnBegin` +and once at the end with `*stats.ConnEnd`. + +The [`stats.Handler`](https://pkg.go.dev/google.golang.org/grpc/stats#Handler) interface also provides +`TagRPC(context.Context, *RPCTagInfo) context.Context` and +`TagConn(context.Context, *ConnTagInfo) context.Context` methods. These methods +are mainly used to attach network related information to the given context. + +The `TagRPC(context.Context, *RPCTagInfo) context.Context` method returns a +context from which the context used for the rest lifetime of the RPC will be +derived. This behavior is consistent between the gRPC client and server. + +The context returned from +`TagConn(context.Context, *ConnTagInfo) context.Context` has varied lifespan: + +- In the gRPC client: + The context used for the rest lifetime of the RPC will NOT be derived from + this context. Hence the information attached to this context can only be + consumed by `HandleConn(context.Context, ConnStats)` method. +- In the gRPC server: + The context used for the rest lifetime of the RPC will be derived from + this context. + +NOTE: The [stats](https://pkg.go.dev/google.golang.org/grpc/stats) package should only be used for network monitoring purposes, +and not as an alternative to [interceptors](https://github.com/grpc/grpc-go/blob/master/examples/features/metadata). diff --git a/examples/features/stats_monitoring/client/main.go b/examples/features/stats_monitoring/client/main.go new file mode 100644 index 000000000000..0fb820d11c63 --- /dev/null +++ b/examples/features/stats_monitoring/client/main.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client to illustrate the use of the stats handler. +package main + +import ( + "context" + "flag" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + echogrpc "google.golang.org/grpc/examples/features/proto/echo" + echopb "google.golang.org/grpc/examples/features/proto/echo" + "google.golang.org/grpc/examples/features/stats_monitoring/statshandler" +) + +var addr = flag.String("addr", "localhost:50051", "the address to connect to") + +func main() { + flag.Parse() + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithStatsHandler(statshandler.New()), + } + conn, err := grpc.Dial(*addr, opts...) + if err != nil { + log.Fatalf("failed to connect to server %q: %v", *addr, err) + } + defer conn.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c := echogrpc.NewEchoClient(conn) + + resp, err := c.UnaryEcho(ctx, &echopb.EchoRequest{Message: "stats handler demo"}) + if err != nil { + log.Fatalf("unexpected error from UnaryEcho: %v", err) + } + log.Printf("RPC response: %s", resp.Message) +} diff --git a/examples/features/stats_monitoring/server/main.go b/examples/features/stats_monitoring/server/main.go new file mode 100644 index 000000000000..a460522c29db --- /dev/null +++ b/examples/features/stats_monitoring/server/main.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server to illustrate the use of the stats handler. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + "time" + + "google.golang.org/grpc" + + echogrpc "google.golang.org/grpc/examples/features/proto/echo" + echopb "google.golang.org/grpc/examples/features/proto/echo" + "google.golang.org/grpc/examples/features/stats_monitoring/statshandler" +) + +var port = flag.Int("port", 50051, "the port to serve on") + +type server struct { + echogrpc.UnimplementedEchoServer +} + +func (s *server) UnaryEcho(ctx context.Context, req *echopb.EchoRequest) (*echopb.EchoResponse, error) { + time.Sleep(2 * time.Second) + return &echopb.EchoResponse{Message: req.Message}, nil +} + +func main() { + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen on port %d: %v", *port, err) + } + log.Printf("server listening at %v\n", lis.Addr()) + + s := grpc.NewServer(grpc.StatsHandler(statshandler.New())) + echogrpc.RegisterEchoServer(s, &server{}) + + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/examples/features/stats_monitoring/statshandler/handler.go b/examples/features/stats_monitoring/statshandler/handler.go new file mode 100644 index 000000000000..85688b8c3856 --- /dev/null +++ b/examples/features/stats_monitoring/statshandler/handler.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package statshandler is an example pkg to illustrate the use of the stats handler. +package statshandler + +import ( + "context" + "log" + "net" + "path/filepath" + + "google.golang.org/grpc/stats" +) + +// Handler implements [stats.Handler](https://pkg.go.dev/google.golang.org/grpc/stats#Handler) interface. +type Handler struct{} + +type connStatCtxKey struct{} + +// TagConn can attach some information to the given context. +// The context used in HandleConn for this connection will be derived from the context returned. +// In the gRPC client: +// The context used in HandleRPC for RPCs on this connection will be the user's context and NOT derived from the context returned here. +// In the gRPC server: +// The context used in HandleRPC for RPCs on this connection will be derived from the context returned here. +func (st *Handler) TagConn(ctx context.Context, stat *stats.ConnTagInfo) context.Context { + log.Printf("[TagConn] [%T]: %+[1]v", stat) + return context.WithValue(ctx, connStatCtxKey{}, stat) +} + +// HandleConn processes the Conn stats. +func (st *Handler) HandleConn(ctx context.Context, stat stats.ConnStats) { + var rAddr net.Addr + if s, ok := ctx.Value(connStatCtxKey{}).(*stats.ConnTagInfo); ok { + rAddr = s.RemoteAddr + } + + if stat.IsClient() { + log.Printf("[server addr: %s] [HandleConn] [%T]: %+[2]v", rAddr, stat) + } else { + log.Printf("[client addr: %s] [HandleConn] [%T]: %+[2]v", rAddr, stat) + } +} + +type rpcStatCtxKey struct{} + +// TagRPC can attach some information to the given context. +// The context used for the rest lifetime of the RPC will be derived from the returned context. +func (st *Handler) TagRPC(ctx context.Context, stat *stats.RPCTagInfo) context.Context { + log.Printf("[TagRPC] [%T]: %+[1]v", stat) + return context.WithValue(ctx, rpcStatCtxKey{}, stat) +} + +// HandleRPC processes the RPC stats. Note: All stat fields are read-only. +func (st *Handler) HandleRPC(ctx context.Context, stat stats.RPCStats) { + var sMethod string + if s, ok := ctx.Value(rpcStatCtxKey{}).(*stats.RPCTagInfo); ok { + sMethod = filepath.Base(s.FullMethodName) + } + + var cAddr net.Addr + // for gRPC clients, key connStatCtxKey{} will not be present in HandleRPC's context. + if s, ok := ctx.Value(connStatCtxKey{}).(*stats.ConnTagInfo); ok { + cAddr = s.RemoteAddr + } + + if stat.IsClient() { + log.Printf("[server method: %s] [HandleRPC] [%T]: %+[2]v", sMethod, stat) + } else { + log.Printf("[client addr: %s] [HandleRPC] [%T]: %+[2]v", cAddr, stat) + } +} + +// New returns a new implementation of [stats.Handler](https://pkg.go.dev/google.golang.org/grpc/stats#Handler) interface. +func New() *Handler { + return &Handler{} +} diff --git a/examples/features/unix_abstract/README.md b/examples/features/unix_abstract/README.md new file mode 100644 index 000000000000..32b3bd5f262c --- /dev/null +++ b/examples/features/unix_abstract/README.md @@ -0,0 +1,29 @@ +# Unix abstract sockets + +This examples shows how to start a gRPC server listening on a unix abstract +socket and how to get a gRPC client to connect to it. + +## What is a unix abstract socket + +An abstract socket address is distinguished from a regular unix socket by the +fact that the first byte of the address is a null byte ('\0'). The address has +no connection with filesystem path names. + +## Try it + +``` +go run server/main.go +``` + +``` +go run client/main.go +``` + +## Explanation + +The gRPC server in this example listens on an address starting with a null byte +and the network is `unix`. The client uses the `unix-abstract` scheme with the +endpoint set to the abstract unix socket address without the null byte. The +`unix` resolver takes care of adding the null byte on the client. See +https://github.com/grpc/grpc/blob/master/doc/naming.md for the more details. + diff --git a/examples/features/unix_abstract/client/main.go b/examples/features/unix_abstract/client/main.go new file mode 100644 index 000000000000..3564e7e82fee --- /dev/null +++ b/examples/features/unix_abstract/client/main.go @@ -0,0 +1,75 @@ +//go:build linux +// +build linux + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an example client which dials a server on an abstract unix +// socket. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + ecpb "google.golang.org/grpc/examples/features/proto/echo" +) + +var ( + // A dial target of `unix:@abstract-unix-socket` should also work fine for + // this example because of golang conventions (net.Dial behavior). But we do + // not recommend this since we explicitly added the `unix-abstract` scheme + // for cross-language compatibility. + addr = flag.String("addr", "abstract-unix-socket", "The unix abstract socket address") +) + +func callUnaryEcho(c ecpb.EchoClient, message string) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + r, err := c.UnaryEcho(ctx, &ecpb.EchoRequest{Message: message}) + if err != nil { + log.Fatalf("could not greet: %v", err) + } + fmt.Println(r.Message) +} + +func makeRPCs(cc *grpc.ClientConn, n int) { + hwc := ecpb.NewEchoClient(cc) + for i := 0; i < n; i++ { + callUnaryEcho(hwc, "this is examples/unix_abstract") + } +} + +func main() { + flag.Parse() + sockAddr := fmt.Sprintf("unix-abstract:%v", *addr) + cc, err := grpc.Dial(sockAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("grpc.Dial(%q) failed: %v", sockAddr, err) + } + defer cc.Close() + + fmt.Printf("--- calling echo.Echo/UnaryEcho to %s\n", sockAddr) + makeRPCs(cc, 10) + fmt.Println() +} diff --git a/examples/features/unix_abstract/server/main.go b/examples/features/unix_abstract/server/main.go new file mode 100644 index 000000000000..7013466b4917 --- /dev/null +++ b/examples/features/unix_abstract/server/main.go @@ -0,0 +1,65 @@ +//go:build linux +// +build linux + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary server is an example server listening for gRPC connections on an +// abstract unix socket. +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + + "google.golang.org/grpc" + + pb "google.golang.org/grpc/examples/features/proto/echo" +) + +var ( + addr = flag.String("addr", "abstract-unix-socket", "The unix abstract socket address") +) + +type ecServer struct { + pb.UnimplementedEchoServer + addr string +} + +func (s *ecServer) UnaryEcho(ctx context.Context, req *pb.EchoRequest) (*pb.EchoResponse, error) { + return &pb.EchoResponse{Message: fmt.Sprintf("%s (from %s)", req.Message, s.addr)}, nil +} + +func main() { + flag.Parse() + netw := "unix" + socketAddr := fmt.Sprintf("@%v", *addr) + lis, err := net.Listen(netw, socketAddr) + if err != nil { + log.Fatalf("net.Listen(%q, %q) failed: %v", netw, socketAddr, err) + } + s := grpc.NewServer() + pb.RegisterEchoServer(s, &ecServer{addr: socketAddr}) + log.Printf("serving on %s\n", lis.Addr().String()) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/examples/features/wait_for_ready/main.go b/examples/features/wait_for_ready/main.go index f865410f1aa2..96524a81da32 100644 --- a/examples/features/wait_for_ready/main.go +++ b/examples/features/wait_for_ready/main.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" pb "google.golang.org/grpc/examples/features/proto/echo" @@ -58,7 +59,7 @@ func serve() { } func main() { - conn, err := grpc.Dial("localhost:50053", grpc.WithInsecure()) + conn, err := grpc.Dial("localhost:50053", grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } diff --git a/examples/features/xds/client/main.go b/examples/features/xds/client/main.go index b1daa1cae9c8..97918faa2245 100644 --- a/examples/features/xds/client/main.go +++ b/examples/features/xds/client/main.go @@ -16,78 +16,56 @@ * */ -// Package main implements a client for Greeter service. +// Binary main implements a client for Greeter service using gRPC's client-side +// support for xDS APIs. package main import ( "context" "flag" - "fmt" "log" + "strings" "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" pb "google.golang.org/grpc/examples/helloworld/helloworld" _ "google.golang.org/grpc/xds" // To install the xds resolvers and balancers. ) -const ( - defaultTarget = "localhost:50051" - defaultName = "world" +var ( + target = flag.String("target", "xds:///localhost:50051", "uri of the Greeter Server, e.g. 'xds:///helloworld-service:8080'") + name = flag.String("name", "world", "name you wished to be greeted by the server") + xdsCreds = flag.Bool("xds_creds", false, "whether the server should use xDS APIs to receive security configuration") ) -var help = flag.Bool("help", false, "Print usage information") - -func init() { - flag.Usage = func() { - fmt.Fprintf(flag.CommandLine.Output(), ` -Usage: client [name [target]] - - name - The name you wish to be greeted by. Defaults to %q - target - The URI of the server, e.g. "xds:///helloworld-service". Defaults to %q -`, defaultName, defaultTarget) - - flag.PrintDefaults() - } -} - func main() { flag.Parse() - if *help { - flag.Usage() - return - } - args := flag.Args() - - if len(args) > 2 { - flag.Usage() - return - } - name := defaultName - if len(args) > 0 { - name = args[0] + if !strings.HasPrefix(*target, "xds:///") { + log.Fatalf("-target must use a URI with scheme set to 'xds'") } - target := defaultTarget - if len(args) > 1 { - target = args[1] + creds := insecure.NewCredentials() + if *xdsCreds { + log.Println("Using xDS credentials...") + var err error + if creds, err = xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}); err != nil { + log.Fatalf("failed to create client-side xDS credentials: %v", err) + } } - - // Set up a connection to the server. - conn, err := grpc.Dial(target, grpc.WithInsecure()) + conn, err := grpc.Dial(*target, grpc.WithTransportCredentials(creds)) if err != nil { - log.Fatalf("did not connect: %v", err) + log.Fatalf("grpc.Dial(%s) failed: %v", *target, err) } defer conn.Close() - c := pb.NewGreeterClient(conn) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) + c := pb.NewGreeterClient(conn) + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}) if err != nil { log.Fatalf("could not greet: %v", err) } diff --git a/examples/features/xds/server/main.go b/examples/features/xds/server/main.go index 7e0815645e5a..a7889dbdf3c3 100644 --- a/examples/features/xds/server/main.go +++ b/examples/features/xds/server/main.go @@ -16,7 +16,8 @@ * */ -// Package main starts Greeter service that will response with the hostname. +// Binary server demonstrated gRPC's support for xDS APIs on the server-side. It +// exposes the Greeter service that will response with the hostname. package main import ( @@ -27,36 +28,30 @@ import ( "math/rand" "net" "os" - "strconv" "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" pb "google.golang.org/grpc/examples/helloworld/helloworld" "google.golang.org/grpc/health" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/reflection" + "google.golang.org/grpc/xds" ) -var help = flag.Bool("help", false, "Print usage information") - -const ( - defaultPort = 50051 +var ( + port = flag.Int("port", 50051, "the port to serve Greeter service requests on. Health service will be served on `port+1`") + xdsCreds = flag.Bool("xds_creds", false, "whether the server should use xDS APIs to receive security configuration") ) -// server is used to implement helloworld.GreeterServer. +// server implements helloworld.GreeterServer interface. type server struct { pb.UnimplementedGreeterServer - serverName string } -func newServer(serverName string) *server { - return &server{ - serverName: serverName, - } -} - -// SayHello implements helloworld.GreeterServer +// SayHello implements helloworld.GreeterServer interface. func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { log.Printf("Received: %v", in.GetName()) return &pb.HelloReply{Message: "Hello " + in.GetName() + ", from " + s.serverName}, nil @@ -72,65 +67,40 @@ func determineHostname() string { return hostname } -func init() { - flag.Usage = func() { - fmt.Fprintf(flag.CommandLine.Output(), ` -Usage: server [port [hostname]] - - port - The listen port. Defaults to %d - hostname - The name clients will see in greet responses. Defaults to the machine's hostname -`, defaultPort) - - flag.PrintDefaults() - } -} - func main() { flag.Parse() - if *help { - flag.Usage() - return - } - args := flag.Args() - if len(args) > 2 { - flag.Usage() - return + greeterPort := fmt.Sprintf(":%d", *port) + greeterLis, err := net.Listen("tcp4", greeterPort) + if err != nil { + log.Fatalf("net.Listen(tcp4, %q) failed: %v", greeterPort, err) } - port := defaultPort - if len(args) > 0 { + creds := insecure.NewCredentials() + if *xdsCreds { + log.Println("Using xDS credentials...") var err error - port, err = strconv.Atoi(args[0]) - if err != nil { - log.Printf("Invalid port number: %v", err) - flag.Usage() - return + if creds, err = xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}); err != nil { + log.Fatalf("failed to create server-side xDS credentials: %v", err) } } - var hostname string - if len(args) > 1 { - hostname = args[1] - } - if hostname == "" { - hostname = determineHostname() - } + greeterServer := xds.NewGRPCServer(grpc.Creds(creds)) + pb.RegisterGreeterServer(greeterServer, &server{serverName: determineHostname()}) - lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + healthPort := fmt.Sprintf(":%d", *port+1) + healthLis, err := net.Listen("tcp4", healthPort) if err != nil { - log.Fatalf("failed to listen: %v", err) + log.Fatalf("net.Listen(tcp4, %q) failed: %v", healthPort, err) } - s := grpc.NewServer() - pb.RegisterGreeterServer(s, newServer(hostname)) - - reflection.Register(s) + grpcServer := grpc.NewServer() healthServer := health.NewServer() healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) - healthpb.RegisterHealthServer(s, healthServer) + healthgrpc.RegisterHealthServer(grpcServer, healthServer) - log.Printf("serving on %s, hostname %s", lis.Addr(), hostname) - s.Serve(lis) + log.Printf("Serving GreeterService on %s and HealthService on %s", greeterLis.Addr().String(), healthLis.Addr().String()) + go func() { + greeterServer.Serve(greeterLis) + }() + grpcServer.Serve(healthLis) } diff --git a/examples/go.mod b/examples/go.mod index 18c67afed969..0bd97db78757 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -1,13 +1,31 @@ module google.golang.org/grpc/examples -go 1.11 +go 1.17 require ( - github.com/golang/protobuf v1.4.2 - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 - google.golang.org/grpc v1.31.0 - google.golang.org/protobuf v1.25.0 + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 + github.com/golang/protobuf v1.5.3 + golang.org/x/oauth2 v0.7.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 + google.golang.org/grpc v1.54.0 + google.golang.org/protobuf v1.30.0 +) + +require ( + cloud.google.com/go/compute v1.19.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect ) replace google.golang.org/grpc => ../ diff --git a/examples/go.sum b/examples/go.sum index f55a3e5c8968..6511b1b756ac 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -1,88 +1,1466 @@ -cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/examples/helloworld/README.md b/examples/helloworld/README.md new file mode 100644 index 000000000000..bb2138f26ffb --- /dev/null +++ b/examples/helloworld/README.md @@ -0,0 +1,29 @@ +# gRPC Hello World + +Follow these setup to run the [quick start][] example: + + 1. Get the code: + + ```console + $ go get google.golang.org/grpc/examples/helloworld/greeter_client + $ go get google.golang.org/grpc/examples/helloworld/greeter_server + ``` + + 2. Run the server: + + ```console + $ $(go env GOPATH)/bin/greeter_server & + ``` + + 3. Run the client: + + ```console + $ $(go env GOPATH)/bin/greeter_client + Greeting: Hello world + ``` + +For more details (including instructions for making a small change to the +example code) or if you're having trouble running this example, see [Quick +Start][]. + +[quick start]: https://grpc.io/docs/languages/go/quickstart diff --git a/examples/helloworld/greeter_client/main.go b/examples/helloworld/greeter_client/main.go index 0ca4cbaa344c..452906937dde 100644 --- a/examples/helloworld/greeter_client/main.go +++ b/examples/helloworld/greeter_client/main.go @@ -21,22 +21,28 @@ package main import ( "context" + "flag" "log" - "os" "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" pb "google.golang.org/grpc/examples/helloworld/helloworld" ) const ( - address = "localhost:50051" defaultName = "world" ) +var ( + addr = flag.String("addr", "localhost:50051", "the address to connect to") + name = flag.String("name", defaultName, "Name to greet") +) + func main() { + flag.Parse() // Set up a connection to the server. - conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBlock()) + conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { log.Fatalf("did not connect: %v", err) } @@ -44,13 +50,9 @@ func main() { c := pb.NewGreeterClient(conn) // Contact the server and print out its response. - name := defaultName - if len(os.Args) > 1 { - name = os.Args[1] - } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - r, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) + r, err := c.SayHello(ctx, &pb.HelloRequest{Name: *name}) if err != nil { log.Fatalf("could not greet: %v", err) } diff --git a/examples/helloworld/greeter_server/main.go b/examples/helloworld/greeter_server/main.go index 15604f9fc1f4..7a62a9b9ff25 100644 --- a/examples/helloworld/greeter_server/main.go +++ b/examples/helloworld/greeter_server/main.go @@ -21,6 +21,8 @@ package main import ( "context" + "flag" + "fmt" "log" "net" @@ -28,8 +30,8 @@ import ( pb "google.golang.org/grpc/examples/helloworld/helloworld" ) -const ( - port = ":50051" +var ( + port = flag.Int("port", 50051, "The server port") ) // server is used to implement helloworld.GreeterServer. @@ -44,12 +46,14 @@ func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloRe } func main() { - lis, err := net.Listen("tcp", port) + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterGreeterServer(s, &server{}) + log.Printf("server listening at %v", lis.Addr()) if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } diff --git a/examples/helloworld/helloworld/helloworld.pb.go b/examples/helloworld/helloworld/helloworld.pb.go index 2d5cbf5d7805..3fe5c1f2863d 100644 --- a/examples/helloworld/helloworld/helloworld.pb.go +++ b/examples/helloworld/helloworld/helloworld.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: examples/helloworld/helloworld/helloworld.proto package helloworld import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The request message containing the user's name. type HelloRequest struct { state protoimpl.MessageState diff --git a/examples/helloworld/helloworld/helloworld_grpc.pb.go b/examples/helloworld/helloworld/helloworld_grpc.pb.go index 39a0301c16b2..55e4f31df3ca 100644 --- a/examples/helloworld/helloworld/helloworld_grpc.pb.go +++ b/examples/helloworld/helloworld/helloworld_grpc.pb.go @@ -1,4 +1,22 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: examples/helloworld/helloworld/helloworld.proto package helloworld @@ -14,6 +32,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Greeter_SayHello_FullMethodName = "/helloworld.Greeter/SayHello" +) + // GreeterClient is the client API for Greeter service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -32,7 +54,7 @@ func NewGreeterClient(cc grpc.ClientConnInterface) GreeterClient { func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { out := new(HelloReply) - err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, opts...) + err := c.cc.Invoke(ctx, Greeter_SayHello_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -78,7 +100,7 @@ func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/helloworld.Greeter/SayHello", + FullMethod: Greeter_SayHello_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) diff --git a/examples/route_guide/README.md b/examples/route_guide/README.md index ddec3a0bb5b8..29343b1c6b59 100644 --- a/examples/route_guide/README.md +++ b/examples/route_guide/README.md @@ -4,11 +4,11 @@ perform unary, client streaming, server streaming and full duplex RPCs. Please refer to [gRPC Basics: Go](https://grpc.io/docs/tutorials/basic/go.html) for more information. -See the definition of the route guide service in routeguide/route_guide.proto. +See the definition of the route guide service in `routeguide/route_guide.proto`. # Run the sample code -To compile and run the server, assuming you are in the root of the route_guide -folder, i.e., .../examples/route_guide/, simply: +To compile and run the server, assuming you are in the root of the `route_guide` +folder, i.e., `.../examples/route_guide/`, simply: ```sh $ go run server/server.go diff --git a/examples/route_guide/client/client.go b/examples/route_guide/client/client.go index 172f10fb308b..d027d2d6d42b 100644 --- a/examples/route_guide/client/client.go +++ b/examples/route_guide/client/client.go @@ -32,6 +32,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/examples/data" pb "google.golang.org/grpc/examples/route_guide/routeguide" ) @@ -39,8 +40,8 @@ import ( var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") caFile = flag.String("ca_file", "", "The file containing the CA root cert file") - serverAddr = flag.String("server_addr", "localhost:10000", "The server address in the format of host:port") - serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name used to verify the hostname returned by the TLS handshake") + serverAddr = flag.String("addr", "localhost:50051", "The server address in the format of host:port") + serverHostOverride = flag.String("server_host_override", "x.test.example.com", "The server name used to verify the hostname returned by the TLS handshake") ) // printFeature gets the feature for the given point. @@ -50,7 +51,7 @@ func printFeature(client pb.RouteGuideClient, point *pb.Point) { defer cancel() feature, err := client.GetFeature(ctx, point) if err != nil { - log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) + log.Fatalf("client.GetFeature failed: %v", err) } log.Println(feature) } @@ -62,7 +63,7 @@ func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { defer cancel() stream, err := client.ListFeatures(ctx, rect) if err != nil { - log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + log.Fatalf("client.ListFeatures failed: %v", err) } for { feature, err := stream.Recv() @@ -70,7 +71,7 @@ func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { break } if err != nil { - log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) + log.Fatalf("client.ListFeatures failed: %v", err) } log.Printf("Feature: name: %q, point:(%v, %v)", feature.GetName(), feature.GetLocation().GetLatitude(), feature.GetLocation().GetLongitude()) @@ -91,16 +92,16 @@ func runRecordRoute(client pb.RouteGuideClient) { defer cancel() stream, err := client.RecordRoute(ctx) if err != nil { - log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) + log.Fatalf("client.RecordRoute failed: %v", err) } for _, point := range points { if err := stream.Send(point); err != nil { - log.Fatalf("%v.Send(%v) = %v", stream, point, err) + log.Fatalf("client.RecordRoute: stream.Send(%v) failed: %v", point, err) } } reply, err := stream.CloseAndRecv() if err != nil { - log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) + log.Fatalf("client.RecordRoute failed: %v", err) } log.Printf("Route summary: %v", reply) } @@ -119,7 +120,7 @@ func runRouteChat(client pb.RouteGuideClient) { defer cancel() stream, err := client.RouteChat(ctx) if err != nil { - log.Fatalf("%v.RouteChat(_) = _, %v", client, err) + log.Fatalf("client.RouteChat failed: %v", err) } waitc := make(chan struct{}) go func() { @@ -131,14 +132,14 @@ func runRouteChat(client pb.RouteGuideClient) { return } if err != nil { - log.Fatalf("Failed to receive a note : %v", err) + log.Fatalf("client.RouteChat failed: %v", err) } log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) } }() for _, note := range notes { if err := stream.Send(note); err != nil { - log.Fatalf("Failed to send a note: %v", err) + log.Fatalf("client.RouteChat: stream.Send(%v) failed: %v", note, err) } } stream.CloseSend() @@ -160,14 +161,13 @@ func main() { } creds, err := credentials.NewClientTLSFromFile(*caFile, *serverHostOverride) if err != nil { - log.Fatalf("Failed to create TLS credentials %v", err) + log.Fatalf("Failed to create TLS credentials: %v", err) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } - opts = append(opts, grpc.WithBlock()) conn, err := grpc.Dial(*serverAddr, opts...) if err != nil { log.Fatalf("fail to dial: %v", err) diff --git a/examples/route_guide/routeguide/route_guide.pb.go b/examples/route_guide/routeguide/route_guide.pb.go index 85c3033c7cd2..482ce5ff6bf1 100644 --- a/examples/route_guide/routeguide/route_guide.pb.go +++ b/examples/route_guide/routeguide/route_guide.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: examples/route_guide/routeguide/route_guide.proto package routeguide import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Points are represented as latitude-longitude pairs in the E7 representation // (degrees multiplied by 10**7 and rounded to the nearest integer). // Latitudes should be in the range +/- 90 degrees and longitude should be in diff --git a/examples/route_guide/routeguide/route_guide_grpc.pb.go b/examples/route_guide/routeguide/route_guide_grpc.pb.go index 66860e63c476..08012c0f4bcf 100644 --- a/examples/route_guide/routeguide/route_guide_grpc.pb.go +++ b/examples/route_guide/routeguide/route_guide_grpc.pb.go @@ -1,4 +1,22 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: examples/route_guide/routeguide/route_guide.proto package routeguide @@ -14,6 +32,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + RouteGuide_GetFeature_FullMethodName = "/routeguide.RouteGuide/GetFeature" + RouteGuide_ListFeatures_FullMethodName = "/routeguide.RouteGuide/ListFeatures" + RouteGuide_RecordRoute_FullMethodName = "/routeguide.RouteGuide/RecordRoute" + RouteGuide_RouteChat_FullMethodName = "/routeguide.RouteGuide/RouteChat" +) + // RouteGuideClient is the client API for RouteGuide service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -54,7 +79,7 @@ func NewRouteGuideClient(cc grpc.ClientConnInterface) RouteGuideClient { func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { out := new(Feature) - err := c.cc.Invoke(ctx, "/routeguide.RouteGuide/GetFeature", in, out, opts...) + err := c.cc.Invoke(ctx, RouteGuide_GetFeature_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -62,7 +87,7 @@ func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...gr } func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { - stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[0], "/routeguide.RouteGuide/ListFeatures", opts...) + stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[0], RouteGuide_ListFeatures_FullMethodName, opts...) if err != nil { return nil, err } @@ -94,7 +119,7 @@ func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { } func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { - stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[1], "/routeguide.RouteGuide/RecordRoute", opts...) + stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[1], RouteGuide_RecordRoute_FullMethodName, opts...) if err != nil { return nil, err } @@ -128,7 +153,7 @@ func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { } func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { - stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[2], "/routeguide.RouteGuide/RouteChat", opts...) + stream, err := c.cc.NewStream(ctx, &RouteGuide_ServiceDesc.Streams[2], RouteGuide_RouteChat_FullMethodName, opts...) if err != nil { return nil, err } @@ -228,7 +253,7 @@ func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/routeguide.RouteGuide/GetFeature", + FullMethod: RouteGuide_GetFeature_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RouteGuideServer).GetFeature(ctx, req.(*Point)) diff --git a/examples/route_guide/server/server.go b/examples/route_guide/server/server.go index dd804406afcd..44b2f963516b 100644 --- a/examples/route_guide/server/server.go +++ b/examples/route_guide/server/server.go @@ -28,10 +28,10 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" "math" "net" + "os" "sync" "time" @@ -50,7 +50,7 @@ var ( certFile = flag.String("cert_file", "", "The TLS cert file") keyFile = flag.String("key_file", "", "The TLS key file") jsonDBFile = flag.String("json_db_file", "", "A json file containing a list of features") - port = flag.Int("port", 10000, "The server port") + port = flag.Int("port", 50051, "The server port") ) type routeGuideServer struct { @@ -155,7 +155,7 @@ func (s *routeGuideServer) loadFeatures(filePath string) { var data []byte if filePath != "" { var err error - data, err = ioutil.ReadFile(filePath) + data, err = os.ReadFile(filePath) if err != nil { log.Fatalf("Failed to load default features: %v", err) } @@ -233,7 +233,7 @@ func main() { } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - log.Fatalf("Failed to generate credentials %v", err) + log.Fatalf("Failed to generate credentials: %v", err) } opts = []grpc.ServerOption{grpc.Creds(creds)} } diff --git a/gcp/observability/config.go b/gcp/observability/config.go new file mode 100644 index 000000000000..ae7ea8b6983c --- /dev/null +++ b/gcp/observability/config.go @@ -0,0 +1,273 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + gcplogging "cloud.google.com/go/logging" + "golang.org/x/oauth2/google" + "google.golang.org/grpc/internal/envconfig" +) + +const envProjectID = "GOOGLE_CLOUD_PROJECT" + +// fetchDefaultProjectID fetches the default GCP project id from environment. +func fetchDefaultProjectID(ctx context.Context) string { + // Step 1: Check ENV var + if s := os.Getenv(envProjectID); s != "" { + logger.Infof("Found project ID from env %v: %v", envProjectID, s) + return s + } + // Step 2: Check default credential + credentials, err := google.FindDefaultCredentials(ctx, gcplogging.WriteScope) + if err != nil { + logger.Infof("Failed to locate Google Default Credential: %v", err) + return "" + } + if credentials.ProjectID == "" { + logger.Infof("Failed to find project ID in default credential: %v", err) + return "" + } + logger.Infof("Found project ID from Google Default Credential: %v", credentials.ProjectID) + return credentials.ProjectID +} + +// validateMethodString validates whether the string passed in is a valid +// pattern. +func validateMethodString(method string) error { + if strings.HasPrefix(method, "/") { + return errors.New("cannot have a leading slash") + } + serviceMethod := strings.Split(method, "/") + if len(serviceMethod) != 2 { + return errors.New("/ must come in between service and method, only one /") + } + if serviceMethod[1] == "" { + return errors.New("method name must be non empty") + } + if serviceMethod[0] == "*" { + return errors.New("cannot have service wildcard * i.e. (*/m)") + } + return nil +} + +func validateLogEventMethod(methods []string, exclude bool) error { + for _, method := range methods { + if method == "*" { + if exclude { + return errors.New("cannot have exclude and a '*' wildcard") + } + continue + } + if err := validateMethodString(method); err != nil { + return fmt.Errorf("invalid method string: %v, err: %v", method, err) + } + } + return nil +} + +func validateLoggingEvents(config *config) error { + if config.CloudLogging == nil { + return nil + } + for _, clientRPCEvent := range config.CloudLogging.ClientRPCEvents { + if err := validateLogEventMethod(clientRPCEvent.Methods, clientRPCEvent.Exclude); err != nil { + return fmt.Errorf("error in clientRPCEvent method: %v", err) + } + } + for _, serverRPCEvent := range config.CloudLogging.ServerRPCEvents { + if err := validateLogEventMethod(serverRPCEvent.Methods, serverRPCEvent.Exclude); err != nil { + return fmt.Errorf("error in serverRPCEvent method: %v", err) + } + } + return nil +} + +// unmarshalAndVerifyConfig unmarshals a json string representing an +// observability config into its internal go format, and also verifies the +// configuration's fields for validity. +func unmarshalAndVerifyConfig(rawJSON json.RawMessage) (*config, error) { + var config config + if err := json.Unmarshal(rawJSON, &config); err != nil { + return nil, fmt.Errorf("error parsing observability config: %v", err) + } + if err := validateLoggingEvents(&config); err != nil { + return nil, fmt.Errorf("error parsing observability config: %v", err) + } + if config.CloudTrace != nil && (config.CloudTrace.SamplingRate > 1 || config.CloudTrace.SamplingRate < 0) { + return nil, fmt.Errorf("error parsing observability config: invalid cloud trace sampling rate %v", config.CloudTrace.SamplingRate) + } + logger.Infof("Parsed ObservabilityConfig: %+v", &config) + return &config, nil +} + +func parseObservabilityConfig() (*config, error) { + if f := envconfig.ObservabilityConfigFile; f != "" { + if envconfig.ObservabilityConfig != "" { + logger.Warning("Ignoring GRPC_GCP_OBSERVABILITY_CONFIG and using GRPC_GCP_OBSERVABILITY_CONFIG_FILE contents.") + } + content, err := os.ReadFile(f) + if err != nil { + return nil, fmt.Errorf("error reading observability configuration file %q: %v", f, err) + } + return unmarshalAndVerifyConfig(content) + } else if envconfig.ObservabilityConfig != "" { + return unmarshalAndVerifyConfig([]byte(envconfig.ObservabilityConfig)) + } + // If the ENV var doesn't exist, do nothing + return nil, nil +} + +func ensureProjectIDInObservabilityConfig(ctx context.Context, config *config) error { + if config.ProjectID == "" { + // Try to fetch the GCP project id + projectID := fetchDefaultProjectID(ctx) + if projectID == "" { + return fmt.Errorf("empty destination project ID") + } + config.ProjectID = projectID + } + return nil +} + +type clientRPCEvents struct { + // Methods is a list of strings which can select a group of methods. By + // default, the list is empty, matching no methods. + // + // The value of the method is in the form of /. + // + // "*" is accepted as a wildcard for: + // 1. The method name. If the value is /*, it matches all + // methods in the specified service. + // 2. The whole value of the field which matches any /. + // It’s not supported when Exclude is true. + // 3. The * wildcard cannot be used on the service name independently, + // */ is not supported. + // + // The service name, when specified, must be the fully qualified service + // name, including the package name. + // + // Examples: + // 1."goo.Foo/Bar" selects only the method "Bar" from service "goo.Foo", + // here “goo” is the package name. + // 2."goo.Foo/*" selects all methods from service "goo.Foo" + // 3. "*" selects all methods from all services. + Methods []string `json:"methods,omitempty"` + // Exclude represents whether the methods denoted by Methods should be + // excluded from logging. The default value is false, meaning the methods + // denoted by Methods are included in the logging. If Exclude is true, the + // wildcard `*` cannot be used as value of an entry in Methods. + Exclude bool `json:"exclude,omitempty"` + // MaxMetadataBytes is the maximum number of bytes of each header to log. If + // the size of the metadata is greater than the defined limit, content past + // the limit will be truncated. The default value is 0. + MaxMetadataBytes int `json:"max_metadata_bytes"` + // MaxMessageBytes is the maximum number of bytes of each message to log. If + // the size of the message is greater than the defined limit, content past + // the limit will be truncated. The default value is 0. + MaxMessageBytes int `json:"max_message_bytes"` +} + +type serverRPCEvents struct { + // Methods is a list of strings which can select a group of methods. By + // default, the list is empty, matching no methods. + // + // The value of the method is in the form of /. + // + // "*" is accepted as a wildcard for: + // 1. The method name. If the value is /*, it matches all + // methods in the specified service. + // 2. The whole value of the field which matches any /. + // It’s not supported when Exclude is true. + // 3. The * wildcard cannot be used on the service name independently, + // */ is not supported. + // + // The service name, when specified, must be the fully qualified service + // name, including the package name. + // + // Examples: + // 1."goo.Foo/Bar" selects only the method "Bar" from service "goo.Foo", + // here “goo” is the package name. + // 2."goo.Foo/*" selects all methods from service "goo.Foo" + // 3. "*" selects all methods from all services. + Methods []string `json:"methods,omitempty"` + // Exclude represents whether the methods denoted by Methods should be + // excluded from logging. The default value is false, meaning the methods + // denoted by Methods are included in the logging. If Exclude is true, the + // wildcard `*` cannot be used as value of an entry in Methods. + Exclude bool `json:"exclude,omitempty"` + // MaxMetadataBytes is the maximum number of bytes of each header to log. If + // the size of the metadata is greater than the defined limit, content past + // the limit will be truncated. The default value is 0. + MaxMetadataBytes int `json:"max_metadata_bytes"` + // MaxMessageBytes is the maximum number of bytes of each message to log. If + // the size of the message is greater than the defined limit, content past + // the limit will be truncated. The default value is 0. + MaxMessageBytes int `json:"max_message_bytes"` +} + +type cloudLogging struct { + // ClientRPCEvents represents the configuration for outgoing RPC's from the + // binary. The client_rpc_events configs are evaluated in text order, the + // first one matched is used. If an RPC doesn't match an entry, it will + // continue on to the next entry in the list. + ClientRPCEvents []clientRPCEvents `json:"client_rpc_events,omitempty"` + + // ServerRPCEvents represents the configuration for incoming RPC's to the + // binary. The server_rpc_events configs are evaluated in text order, the + // first one matched is used. If an RPC doesn't match an entry, it will + // continue on to the next entry in the list. + ServerRPCEvents []serverRPCEvents `json:"server_rpc_events,omitempty"` +} + +type cloudMonitoring struct{} + +type cloudTrace struct { + // SamplingRate is the global setting that controls the probability of a RPC + // being traced. For example, 0.05 means there is a 5% chance for a RPC to + // be traced, 1.0 means trace every call, 0 means don’t start new traces. By + // default, the sampling_rate is 0. + SamplingRate float64 `json:"sampling_rate,omitempty"` +} + +type config struct { + // ProjectID is the destination GCP project identifier for uploading log + // entries. If empty, the gRPC Observability plugin will attempt to fetch + // the project_id from the GCP environment variables, or from the default + // credentials. If not found, the observability init functions will return + // an error. + ProjectID string `json:"project_id,omitempty"` + // CloudLogging defines the logging options. If not present, logging is disabled. + CloudLogging *cloudLogging `json:"cloud_logging,omitempty"` + // CloudMonitoring determines whether or not metrics are enabled based on + // whether it is present or not. If present, monitoring will be enabled, if + // not present, monitoring is disabled. + CloudMonitoring *cloudMonitoring `json:"cloud_monitoring,omitempty"` + // CloudTrace defines the tracing options. When present, tracing is enabled + // with default configurations. When absent, the tracing is disabled. + CloudTrace *cloudTrace `json:"cloud_trace,omitempty"` + // Labels are applied to cloud logging, monitoring, and trace. + Labels map[string]string `json:"labels,omitempty"` +} diff --git a/gcp/observability/exporting.go b/gcp/observability/exporting.go new file mode 100644 index 000000000000..3c27b3533e04 --- /dev/null +++ b/gcp/observability/exporting.go @@ -0,0 +1,108 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "context" + "fmt" + "time" + + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats/opencensus" + + gcplogging "cloud.google.com/go/logging" +) + +// cOptsDisableLogTrace are client options for the go client libraries which are +// used to configure connections to GCP exporting backends. These disable global +// dial and server options set by this module, which configure logging, metrics, +// and tracing on all created grpc.ClientConn's and grpc.Server's. These options +// turn on only metrics, and also disable the client libraries behavior of +// plumbing in the older opencensus instrumentation code. +var cOptsDisableLogTrace = []option.ClientOption{ + option.WithTelemetryDisabled(), + option.WithGRPCDialOption(internal.DisableGlobalDialOptions.(func() grpc.DialOption)()), + option.WithGRPCDialOption(opencensus.DialOption(opencensus.TraceOptions{ + DisableTrace: true, + })), +} + +// loggingExporter is the interface of logging exporter for gRPC Observability. +// In future, we might expose this to allow users provide custom exporters. But +// now, it exists for testing purposes. +type loggingExporter interface { + // EmitGrpcLogRecord writes a gRPC LogRecord to cache without blocking. + EmitGcpLoggingEntry(entry gcplogging.Entry) + // Close flushes all pending data and closes the exporter. + Close() error +} + +type cloudLoggingExporter struct { + projectID string + client *gcplogging.Client + logger *gcplogging.Logger +} + +func newCloudLoggingExporter(ctx context.Context, config *config) (loggingExporter, error) { + c, err := gcplogging.NewClient(ctx, fmt.Sprintf("projects/%v", config.ProjectID), cOptsDisableLogTrace...) + if err != nil { + return nil, fmt.Errorf("failed to create cloudLoggingExporter: %v", err) + } + defer logger.Infof("Successfully created cloudLoggingExporter") + if len(config.Labels) != 0 { + logger.Infof("Adding labels: %+v", config.Labels) + } + return &cloudLoggingExporter{ + projectID: config.ProjectID, + client: c, + logger: c.Logger("microservices.googleapis.com/observability/grpc", gcplogging.CommonLabels(config.Labels), gcplogging.BufferedByteLimit(1024*1024*50), gcplogging.DelayThreshold(time.Second*10)), + }, nil +} + +func (cle *cloudLoggingExporter) EmitGcpLoggingEntry(entry gcplogging.Entry) { + cle.logger.Log(entry) + if logger.V(2) { + logger.Infof("Uploading event to CloudLogging: %+v", entry) + } +} + +func (cle *cloudLoggingExporter) Close() error { + var errFlush, errClose error + if cle.logger != nil { + errFlush = cle.logger.Flush() + } + if cle.client != nil { + errClose = cle.client.Close() + } + if errFlush != nil && errClose != nil { + return fmt.Errorf("failed to close exporter. Flush failed: %v; Close failed: %v", errFlush, errClose) + } + if errFlush != nil { + return errFlush + } + if errClose != nil { + return errClose + } + cle.logger = nil + cle.client = nil + logger.Infof("Closed CloudLogging exporter") + return nil +} diff --git a/gcp/observability/go.mod b/gcp/observability/go.mod new file mode 100644 index 000000000000..6f186e92ef59 --- /dev/null +++ b/gcp/observability/go.mod @@ -0,0 +1,43 @@ +module google.golang.org/grpc/gcp/observability + +go 1.17 + +require ( + cloud.google.com/go/logging v1.7.0 + contrib.go.opencensus.io/exporter/stackdriver v0.13.12 + github.com/google/go-cmp v0.5.9 + github.com/google/uuid v1.3.0 + go.opencensus.io v0.24.0 + golang.org/x/oauth2 v0.7.0 + google.golang.org/api v0.114.0 + google.golang.org/grpc v1.54.0 + google.golang.org/grpc/stats/opencensus v1.0.0 +) + +require ( + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect + cloud.google.com/go/trace v1.9.0 // indirect + github.com/aws/aws-sdk-go v1.44.162 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/prometheus/prometheus v2.5.0+incompatible // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) + +replace google.golang.org/grpc => ../.. diff --git a/gcp/observability/go.sum b/gcp/observability/go.sum new file mode 100644 index 000000000000..b06bf212fd0b --- /dev/null +++ b/gcp/observability/go.sum @@ -0,0 +1,1503 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= +github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/stats/opencensus v1.0.0 h1:evSYcRZaSToQp+borzWE52+03joezZeXcKJvZDfkUJA= +google.golang.org/grpc/stats/opencensus v1.0.0/go.mod h1:FhdkeYvN43wLYUnapVuRJJ9JXkNwe403iLUW2LKSnjs= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/gcp/observability/logging.go b/gcp/observability/logging.go new file mode 100644 index 000000000000..0ffbd93b3922 --- /dev/null +++ b/gcp/observability/logging.go @@ -0,0 +1,505 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "strings" + "time" + + gcplogging "cloud.google.com/go/logging" + "github.com/google/uuid" + "go.opencensus.io/trace" + + "google.golang.org/grpc" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + iblog "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/stats/opencensus" +) + +var lExporter loggingExporter + +var newLoggingExporter = newCloudLoggingExporter + +var canonicalString = internal.CanonicalString.(func(codes.Code) string) + +// translateMetadata translates the metadata from Binary Logging format to +// its GrpcLogEntry equivalent. +func translateMetadata(m *binlogpb.Metadata) map[string]string { + metadata := make(map[string]string) + for _, entry := range m.GetEntry() { + entryKey := entry.GetKey() + var newVal string + if strings.HasSuffix(entryKey, "-bin") { // bin header + newVal = base64.StdEncoding.EncodeToString(entry.GetValue()) + } else { // normal header + newVal = string(entry.GetValue()) + } + var oldVal string + var ok bool + if oldVal, ok = metadata[entryKey]; !ok { + metadata[entryKey] = newVal + continue + } + metadata[entryKey] = oldVal + "," + newVal + } + return metadata +} + +func setPeerIfPresent(binlogEntry *binlogpb.GrpcLogEntry, grpcLogEntry *grpcLogEntry) { + if binlogEntry.GetPeer() != nil { + grpcLogEntry.Peer.Type = addrType(binlogEntry.GetPeer().GetType()) + grpcLogEntry.Peer.Address = binlogEntry.GetPeer().GetAddress() + grpcLogEntry.Peer.IPPort = binlogEntry.GetPeer().GetIpPort() + } +} + +var loggerTypeToEventLogger = map[binlogpb.GrpcLogEntry_Logger]loggerType{ + binlogpb.GrpcLogEntry_LOGGER_UNKNOWN: loggerUnknown, + binlogpb.GrpcLogEntry_LOGGER_CLIENT: loggerClient, + binlogpb.GrpcLogEntry_LOGGER_SERVER: loggerServer, +} + +type eventType int + +const ( + // eventTypeUnknown is an unknown event type. + eventTypeUnknown eventType = iota + // eventTypeClientHeader is a header sent from client to server. + eventTypeClientHeader + // eventTypeServerHeader is a header sent from server to client. + eventTypeServerHeader + // eventTypeClientMessage is a message sent from client to server. + eventTypeClientMessage + // eventTypeServerMessage is a message sent from server to client. + eventTypeServerMessage + // eventTypeClientHalfClose is a signal that the loggerClient is done sending. + eventTypeClientHalfClose + // eventTypeServerTrailer indicated the end of a gRPC call. + eventTypeServerTrailer + // eventTypeCancel is a signal that the rpc is canceled. + eventTypeCancel +) + +func (t eventType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case eventTypeUnknown: + buffer.WriteString("EVENT_TYPE_UNKNOWN") + case eventTypeClientHeader: + buffer.WriteString("CLIENT_HEADER") + case eventTypeServerHeader: + buffer.WriteString("SERVER_HEADER") + case eventTypeClientMessage: + buffer.WriteString("CLIENT_MESSAGE") + case eventTypeServerMessage: + buffer.WriteString("SERVER_MESSAGE") + case eventTypeClientHalfClose: + buffer.WriteString("CLIENT_HALF_CLOSE") + case eventTypeServerTrailer: + buffer.WriteString("SERVER_TRAILER") + case eventTypeCancel: + buffer.WriteString("CANCEL") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +type loggerType int + +const ( + loggerUnknown loggerType = iota + loggerClient + loggerServer +) + +func (t loggerType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case loggerUnknown: + buffer.WriteString("LOGGER_UNKNOWN") + case loggerClient: + buffer.WriteString("CLIENT") + case loggerServer: + buffer.WriteString("SERVER") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +type payload struct { + Metadata map[string]string `json:"metadata,omitempty"` + // Timeout is the RPC timeout value. + Timeout time.Duration `json:"timeout,omitempty"` + // StatusCode is the gRPC status code. + StatusCode string `json:"statusCode,omitempty"` + // StatusMessage is the gRPC status message. + StatusMessage string `json:"statusMessage,omitempty"` + // StatusDetails is the value of the grpc-status-details-bin metadata key, + // if any. This is always an encoded google.rpc.Status message. + StatusDetails []byte `json:"statusDetails,omitempty"` + // MessageLength is the length of the message. + MessageLength uint32 `json:"messageLength,omitempty"` + // Message is the message of this entry. This is populated in the case of a + // message event. + Message []byte `json:"message,omitempty"` +} + +type addrType int + +const ( + typeUnknown addrType = iota // `json:"TYPE_UNKNOWN"` + ipv4 // `json:"IPV4"` + ipv6 // `json:"IPV6"` + unix // `json:"UNIX"` +) + +func (at addrType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch at { + case typeUnknown: + buffer.WriteString("TYPE_UNKNOWN") + case ipv4: + buffer.WriteString("IPV4") + case ipv6: + buffer.WriteString("IPV6") + case unix: + buffer.WriteString("UNIX") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +type address struct { + // Type is the address type of the address of the peer of the RPC. + Type addrType `json:"type,omitempty"` + // Address is the address of the peer of the RPC. + Address string `json:"address,omitempty"` + // IPPort is the ip and port in string form. It is used only for addrType + // typeIPv4 and typeIPv6. + IPPort uint32 `json:"ipPort,omitempty"` +} + +type grpcLogEntry struct { + // CallID is a uuid which uniquely identifies a call. Each call may have + // several log entries. They will all have the same CallID. Nothing is + // guaranteed about their value other than they are unique across different + // RPCs in the same gRPC process. + CallID string `json:"callId,omitempty"` + // SequenceID is the entry sequence ID for this call. The first message has + // a value of 1, to disambiguate from an unset value. The purpose of this + // field is to detect missing entries in environments where durability or + // ordering is not guaranteed. + SequenceID uint64 `json:"sequenceId,omitempty"` + // Type is the type of binary logging event being logged. + Type eventType `json:"type,omitempty"` + // Logger is the entity that generates the log entry. + Logger loggerType `json:"logger,omitempty"` + // Payload is the payload of this log entry. + Payload payload `json:"payload,omitempty"` + // PayloadTruncated is whether the message or metadata field is either + // truncated or emitted due to options specified in the configuration. + PayloadTruncated bool `json:"payloadTruncated,omitempty"` + // Peer is information about the Peer of the RPC. + Peer address `json:"peer,omitempty"` + // A single process may be used to run multiple virtual servers with + // different identities. + // Authority is the name of such a server identify. It is typically a + // portion of the URI in the form of or :. + Authority string `json:"authority,omitempty"` + // ServiceName is the name of the service. + ServiceName string `json:"serviceName,omitempty"` + // MethodName is the name of the RPC method. + MethodName string `json:"methodName,omitempty"` +} + +type methodLoggerBuilder interface { + Build(iblog.LogEntryConfig) *binlogpb.GrpcLogEntry +} + +type binaryMethodLogger struct { + callID, serviceName, methodName, authority, projectID string + + mlb methodLoggerBuilder + exporter loggingExporter + clientSide bool +} + +// buildGCPLoggingEntry converts the binary log log entry into a gcp logging +// entry. +func (bml *binaryMethodLogger) buildGCPLoggingEntry(ctx context.Context, c iblog.LogEntryConfig) gcplogging.Entry { + binLogEntry := bml.mlb.Build(c) + + grpcLogEntry := &grpcLogEntry{ + CallID: bml.callID, + SequenceID: binLogEntry.GetSequenceIdWithinCall(), + Logger: loggerTypeToEventLogger[binLogEntry.Logger], + } + + switch binLogEntry.GetType() { + case binlogpb.GrpcLogEntry_EVENT_TYPE_UNKNOWN: + grpcLogEntry.Type = eventTypeUnknown + case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER: + grpcLogEntry.Type = eventTypeClientHeader + if binLogEntry.GetClientHeader() != nil { + methodName := binLogEntry.GetClientHeader().MethodName + // Example method name: /grpc.testing.TestService/UnaryCall + if strings.Contains(methodName, "/") { + tokens := strings.Split(methodName, "/") + if len(tokens) == 3 { + // Record service name and method name for all events. + bml.serviceName = tokens[1] + bml.methodName = tokens[2] + } else { + logger.Infof("Malformed method name: %v", methodName) + } + } + bml.authority = binLogEntry.GetClientHeader().GetAuthority() + grpcLogEntry.Payload.Timeout = binLogEntry.GetClientHeader().GetTimeout().AsDuration() + grpcLogEntry.Payload.Metadata = translateMetadata(binLogEntry.GetClientHeader().GetMetadata()) + } + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() + setPeerIfPresent(binLogEntry, grpcLogEntry) + case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER: + grpcLogEntry.Type = eventTypeServerHeader + if binLogEntry.GetServerHeader() != nil { + grpcLogEntry.Payload.Metadata = translateMetadata(binLogEntry.GetServerHeader().GetMetadata()) + } + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() + setPeerIfPresent(binLogEntry, grpcLogEntry) + case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE: + grpcLogEntry.Type = eventTypeClientMessage + grpcLogEntry.Payload.Message = binLogEntry.GetMessage().GetData() + grpcLogEntry.Payload.MessageLength = binLogEntry.GetMessage().GetLength() + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() + case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE: + grpcLogEntry.Type = eventTypeServerMessage + grpcLogEntry.Payload.Message = binLogEntry.GetMessage().GetData() + grpcLogEntry.Payload.MessageLength = binLogEntry.GetMessage().GetLength() + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() + case binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE: + grpcLogEntry.Type = eventTypeClientHalfClose + case binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER: + grpcLogEntry.Type = eventTypeServerTrailer + grpcLogEntry.Payload.Metadata = translateMetadata(binLogEntry.GetTrailer().Metadata) + grpcLogEntry.Payload.StatusCode = canonicalString(codes.Code(binLogEntry.GetTrailer().GetStatusCode())) + grpcLogEntry.Payload.StatusMessage = binLogEntry.GetTrailer().GetStatusMessage() + grpcLogEntry.Payload.StatusDetails = binLogEntry.GetTrailer().GetStatusDetails() + grpcLogEntry.PayloadTruncated = binLogEntry.GetPayloadTruncated() + setPeerIfPresent(binLogEntry, grpcLogEntry) + case binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL: + grpcLogEntry.Type = eventTypeCancel + } + grpcLogEntry.ServiceName = bml.serviceName + grpcLogEntry.MethodName = bml.methodName + grpcLogEntry.Authority = bml.authority + + var sc trace.SpanContext + var ok bool + if bml.clientSide { + // client side span, populated through opencensus trace package. + if span := trace.FromContext(ctx); span != nil { + sc = span.SpanContext() + ok = true + } + } else { + // server side span, populated through stats/opencensus package. + sc, ok = opencensus.SpanContextFromContext(ctx) + } + gcploggingEntry := gcplogging.Entry{ + Timestamp: binLogEntry.GetTimestamp().AsTime(), + Severity: 100, + Payload: grpcLogEntry, + } + if ok { + gcploggingEntry.Trace = "projects/" + bml.projectID + "/traces/" + sc.TraceID.String() + gcploggingEntry.SpanID = sc.SpanID.String() + gcploggingEntry.TraceSampled = sc.IsSampled() + } + return gcploggingEntry +} + +func (bml *binaryMethodLogger) Log(ctx context.Context, c iblog.LogEntryConfig) { + bml.exporter.EmitGcpLoggingEntry(bml.buildGCPLoggingEntry(ctx, c)) +} + +type eventConfig struct { + // ServiceMethod has /s/m syntax for fast matching. + ServiceMethod map[string]bool + Services map[string]bool + MatchAll bool + + // If true, won't log anything. + Exclude bool + HeaderBytes uint64 + MessageBytes uint64 +} + +type binaryLogger struct { + EventConfigs []eventConfig + projectID string + exporter loggingExporter + clientSide bool +} + +func (bl *binaryLogger) GetMethodLogger(methodName string) iblog.MethodLogger { + s, _, err := grpcutil.ParseMethod(methodName) + if err != nil { + logger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + for _, eventConfig := range bl.EventConfigs { + if eventConfig.MatchAll || eventConfig.ServiceMethod[methodName] || eventConfig.Services[s] { + if eventConfig.Exclude { + return nil + } + + return &binaryMethodLogger{ + exporter: bl.exporter, + mlb: iblog.NewTruncatingMethodLogger(eventConfig.HeaderBytes, eventConfig.MessageBytes), + callID: uuid.NewString(), + projectID: bl.projectID, + clientSide: bl.clientSide, + } + } + } + return nil +} + +// parseMethod splits service and method from the input. It expects format +// "service/method". +func parseMethod(method string) (string, string, error) { + pos := strings.Index(method, "/") + if pos < 0 { + // Shouldn't happen, config already validated. + return "", "", errors.New("invalid method name: no / found") + } + return method[:pos], method[pos+1:], nil +} + +func registerClientRPCEvents(config *config, exporter loggingExporter) { + clientRPCEvents := config.CloudLogging.ClientRPCEvents + if len(clientRPCEvents) == 0 { + return + } + var eventConfigs []eventConfig + for _, clientRPCEvent := range clientRPCEvents { + eventConfig := eventConfig{ + Exclude: clientRPCEvent.Exclude, + HeaderBytes: uint64(clientRPCEvent.MaxMetadataBytes), + MessageBytes: uint64(clientRPCEvent.MaxMessageBytes), + } + for _, method := range clientRPCEvent.Methods { + eventConfig.ServiceMethod = make(map[string]bool) + eventConfig.Services = make(map[string]bool) + if method == "*" { + eventConfig.MatchAll = true + continue + } + s, m, err := parseMethod(method) + if err != nil { + continue + } + if m == "*" { + eventConfig.Services[s] = true + continue + } + eventConfig.ServiceMethod["/"+method] = true + } + eventConfigs = append(eventConfigs, eventConfig) + } + clientSideLogger := &binaryLogger{ + EventConfigs: eventConfigs, + exporter: exporter, + projectID: config.ProjectID, + clientSide: true, + } + internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl iblog.Logger) grpc.DialOption)(clientSideLogger)) +} + +func registerServerRPCEvents(config *config, exporter loggingExporter) { + serverRPCEvents := config.CloudLogging.ServerRPCEvents + if len(serverRPCEvents) == 0 { + return + } + var eventConfigs []eventConfig + for _, serverRPCEvent := range serverRPCEvents { + eventConfig := eventConfig{ + Exclude: serverRPCEvent.Exclude, + HeaderBytes: uint64(serverRPCEvent.MaxMetadataBytes), + MessageBytes: uint64(serverRPCEvent.MaxMessageBytes), + } + for _, method := range serverRPCEvent.Methods { + eventConfig.ServiceMethod = make(map[string]bool) + eventConfig.Services = make(map[string]bool) + if method == "*" { + eventConfig.MatchAll = true + continue + } + s, m, err := parseMethod(method) + if err != nil { + continue + } + if m == "*" { + eventConfig.Services[s] = true + continue + } + eventConfig.ServiceMethod["/"+method] = true + } + eventConfigs = append(eventConfigs, eventConfig) + } + serverSideLogger := &binaryLogger{ + EventConfigs: eventConfigs, + exporter: exporter, + projectID: config.ProjectID, + clientSide: false, + } + internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl iblog.Logger) grpc.ServerOption)(serverSideLogger)) +} + +func startLogging(ctx context.Context, config *config) error { + if config == nil || config.CloudLogging == nil { + return nil + } + var err error + lExporter, err = newLoggingExporter(ctx, config) + if err != nil { + return fmt.Errorf("unable to create CloudLogging exporter: %v", err) + } + + registerClientRPCEvents(config, lExporter) + registerServerRPCEvents(config, lExporter) + return nil +} + +func stopLogging() { + internal.ClearGlobalDialOptions() + internal.ClearGlobalServerOptions() + if lExporter != nil { + // This Close() call handles the flushing of the logging buffer. + lExporter.Close() + } +} diff --git a/gcp/observability/logging_test.go b/gcp/observability/logging_test.go new file mode 100644 index 000000000000..5e2bab44be48 --- /dev/null +++ b/gcp/observability/logging_test.go @@ -0,0 +1,1346 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "testing" + + gcplogging "cloud.google.com/go/logging" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func cmpLoggingEntryList(got []*grpcLogEntry, want []*grpcLogEntry) error { + if diff := cmp.Diff(got, want, + // For nondeterministic metadata iteration. + cmp.Comparer(func(a map[string]string, b map[string]string) bool { + if len(a) > len(b) { + a, b = b, a + } + if len(a) == 0 && len(a) != len(b) { // No metadata for one and the other comparator wants metadata. + return false + } + for k, v := range a { + if b[k] != v { + return false + } + } + return true + }), + cmpopts.IgnoreFields(grpcLogEntry{}, "CallID", "Peer"), + cmpopts.IgnoreFields(address{}, "IPPort", "Type"), + cmpopts.IgnoreFields(payload{}, "Timeout")); diff != "" { + return fmt.Errorf("got unexpected grpcLogEntry list, diff (-got, +want): %v", diff) + } + return nil +} + +type fakeLoggingExporter struct { + t *testing.T + + mu sync.Mutex + entries []*grpcLogEntry + + idsSeen []*traceAndSpanIDString +} + +func (fle *fakeLoggingExporter) EmitGcpLoggingEntry(entry gcplogging.Entry) { + fle.mu.Lock() + defer fle.mu.Unlock() + if entry.Severity != 100 { + fle.t.Errorf("entry.Severity is not 100, this should be hardcoded") + } + + ids := &traceAndSpanIDString{ + traceID: entry.Trace, + spanID: entry.SpanID, + isSampled: entry.TraceSampled, + } + fle.idsSeen = append(fle.idsSeen, ids) + + grpcLogEntry, ok := entry.Payload.(*grpcLogEntry) + if !ok { + fle.t.Errorf("payload passed in isn't grpcLogEntry") + } + fle.entries = append(fle.entries, grpcLogEntry) +} + +func (fle *fakeLoggingExporter) Close() error { + return nil +} + +// setupObservabilitySystemWithConfig sets up the observability system with the +// specified config, and returns a function which cleans up the observability +// system. +func setupObservabilitySystemWithConfig(cfg *config) (func(), error) { + validConfigJSON, err := json.Marshal(cfg) + if err != nil { + return nil, fmt.Errorf("failed to convert config to JSON: %v", err) + } + oldObservabilityConfig := envconfig.ObservabilityConfig + envconfig.ObservabilityConfig = string(validConfigJSON) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err = Start(ctx) + cleanup := func() { + End() + envconfig.ObservabilityConfig = oldObservabilityConfig + } + if err != nil { + return cleanup, fmt.Errorf("error in Start: %v", err) + } + return cleanup, nil +} + +// TestClientRPCEventsLogAll tests the observability system configured with a +// client RPC event that logs every call. It performs a Unary and Bidirectional +// Streaming RPC, and expects certain grpcLogEntries to make it's way to the +// exporter. +func (s) TestClientRPCEventsLogAll(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + clientRPCEventLogAllConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(clientRPCEventLogAllConfig) + if err != nil { + t.Fatalf("error setting up observability: %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { + return err + } + if _, err := stream.Recv(); err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + StatusCode: "OK", + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + + fle.entries = nil + fle.mu.Unlock() + + // Make a streaming RPC. This should cause Log calls on the MethodLogger. + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("stream.Send() failed: %v", err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("stream.Recv() failed: %v", err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("stream.CloseSend()() failed: %v", err) + } + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + grpcLogEntriesWant = []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 4, + Authority: ss.Address, + }, + { + Type: eventTypeClientHalfClose, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 5, + Authority: ss.Address, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 6, + Payload: payload{ + Metadata: map[string]string{}, + StatusCode: "OK", + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.mu.Unlock() +} + +func (s) TestServerRPCEventsLogAll(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + serverRPCEventLogAllConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ServerRPCEvents: []serverRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(serverRPCEventLogAllConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { + return err + } + if _, err := stream.Recv(); err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + }, + { + Type: eventTypeServerHeader, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + StatusCode: "OK", + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.entries = nil + fle.mu.Unlock() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("stream.Send() failed: %v", err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("stream.Recv() failed: %v", err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("stream.CloseSend()() failed: %v", err) + } + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + grpcLogEntriesWant = []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 2, + Authority: ss.Address, + }, + { + Type: eventTypeServerHeader, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 4, + Authority: ss.Address, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeClientHalfClose, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 5, + Authority: ss.Address, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerServer, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 6, + Payload: payload{ + Metadata: map[string]string{}, + StatusCode: "OK", + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.mu.Unlock() +} + +// TestBothClientAndServerRPCEvents tests the scenario where you have both +// Client and Server RPC Events configured to log. Both sides should log and +// share the exporter, so the exporter should receive the collective amount of +// calls for both a client stream (corresponding to a Client RPC Event) and a +// server stream (corresponding ot a Server RPC Event). The specificity of the +// entries are tested in previous tests. +func (s) TestBothClientAndServerRPCEvents(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + serverRPCEventLogAllConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + ServerRPCEvents: []serverRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + + cleanup, err := setupObservabilitySystemWithConfig(serverRPCEventLogAllConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + // Make a Unary RPC. Both client side and server side streams should log + // entries, which share the same exporter. The exporter should thus receive + // entries from both the client and server streams (the specificity of + // entries is checked in previous tests). + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + fle.mu.Lock() + if len(fle.entries) != 10 { + fle.mu.Unlock() + t.Fatalf("Unexpected length of entries %v, want 10 (collective of client and server)", len(fle.entries)) + } + fle.mu.Unlock() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + fle.mu.Lock() + if len(fle.entries) != 16 { + fle.mu.Unlock() + t.Fatalf("Unexpected length of entries %v, want 16 (collective of client and server)", len(fle.entries)) + } + fle.mu.Unlock() +} + +// TestClientRPCEventsLogAll tests the observability system configured with a +// client RPC event that logs every call and that truncates headers and +// messages. It performs a Unary RPC, and expects events with truncated payloads +// and payloadTruncated set to true, signifying the system properly truncated +// headers and messages logged. +func (s) TestClientRPCEventsTruncateHeaderAndMetadata(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + clientRPCEventLogAllConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 10, + MaxMessageBytes: 2, + }, + }, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(clientRPCEventLogAllConfig) + if err != nil { + t.Fatalf("error setting up observability: %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + md := metadata.MD{ + "key1": []string{"value1"}, + "key2": []string{"value2"}, + } + ctx = metadata.NewOutgoingContext(ctx, md) + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: []byte("00000")}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + PayloadTruncated: true, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + MessageLength: 9, + Message: []uint8{ + 0x1a, + 0x07, + }, + }, + PayloadTruncated: true, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + StatusCode: "OK", + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + // Only one metadata entry should have been present in logging due to + // truncation. + if mdLen := len(fle.entries[0].Payload.Metadata); mdLen != 1 { + t.Fatalf("Metadata should have only 1 entry due to truncation, got %v", mdLen) + } + fle.mu.Unlock() +} + +// TestPrecedenceOrderingInConfiguration tests the scenario where the logging +// part of observability is configured with three client RPC events, the first +// two on specific methods in the service, the last one for any method within +// the service. This test sends three RPC's, one corresponding to each log +// entry. The logging logic dictated by that specific event should be what is +// used for emission. The second event will specify to exclude logging on RPC's, +// which should generate no log entries if an RPC gets to and matches that +// event. +func (s) TestPrecedenceOrderingInConfiguration(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + threeEventsConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"grpc.testing.TestService/UnaryCall"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + { + Methods: []string{"grpc.testing.TestService/EmptyCall"}, + Exclude: true, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + { + Methods: []string{"grpc.testing.TestService/*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + + cleanup, err := setupObservabilitySystemWithConfig(threeEventsConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + // A Unary RPC should match with first event and logs should correspond + // accordingly. The first event it matches to should be used for the + // configuration, even though it could potentially match to events in the + // future. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + Message: []uint8{}, + }, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + StatusCode: "OK", + }, + }, + } + + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.entries = nil + fle.mu.Unlock() + + // A unary empty RPC should match with the second event, which has the exclude + // flag set. Thus, a unary empty RPC should cause no downstream logs. + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("Unexpected error from EmptyCall: %v", err) + } + // The exporter should have received no new log entries due to this call. + fle.mu.Lock() + if len(fle.entries) != 0 { + fle.mu.Unlock() + t.Fatalf("Unexpected length of entries %v, want 0", len(fle.entries)) + } + fle.mu.Unlock() + + // A third RPC, a full duplex call, which doesn't match with first two and + // matches to last one, due to being a wildcard for every method in the + // service, should log accordingly to the last event's logic. + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + grpcLogEntriesWant = []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeClientHalfClose, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + SequenceID: 2, + Authority: ss.Address, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "FullDuplexCall", + Authority: ss.Address, + SequenceID: 3, + Payload: payload{ + Metadata: map[string]string{}, + StatusCode: "OK", + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.mu.Unlock() +} + +func (s) TestTranslateMetadata(t *testing.T) { + concatBinLogValue := base64.StdEncoding.EncodeToString([]byte("value1")) + "," + base64.StdEncoding.EncodeToString([]byte("value2")) + tests := []struct { + name string + binLogMD *binlogpb.Metadata + wantMD map[string]string + }{ + { + name: "two-entries-different-key", + binLogMD: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ + { + Key: "header1", + Value: []byte("value1"), + }, + { + Key: "header2", + Value: []byte("value2"), + }, + }, + }, + wantMD: map[string]string{ + "header1": "value1", + "header2": "value2", + }, + }, + { + name: "two-entries-same-key", + binLogMD: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ + { + Key: "header1", + Value: []byte("value1"), + }, + { + Key: "header1", + Value: []byte("value2"), + }, + }, + }, + wantMD: map[string]string{ + "header1": "value1,value2", + }, + }, + { + name: "two-entries-same-key-bin-header", + binLogMD: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ + { + Key: "header1-bin", + Value: []byte("value1"), + }, + { + Key: "header1-bin", + Value: []byte("value2"), + }, + }, + }, + wantMD: map[string]string{ + "header1-bin": concatBinLogValue, + }, + }, + { + name: "four-entries-two-keys", + binLogMD: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ + { + Key: "header1", + Value: []byte("value1"), + }, + { + Key: "header1", + Value: []byte("value2"), + }, + { + Key: "header1-bin", + Value: []byte("value1"), + }, + { + Key: "header1-bin", + Value: []byte("value2"), + }, + }, + }, + wantMD: map[string]string{ + "header1": "value1,value2", + "header1-bin": concatBinLogValue, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if gotMD := translateMetadata(test.binLogMD); !cmp.Equal(gotMD, test.wantMD) { + t.Fatalf("translateMetadata(%v) = %v, want %v", test.binLogMD, gotMD, test.wantMD) + } + }) + } +} + +func (s) TestMarshalJSON(t *testing.T) { + logEntry := &grpcLogEntry{ + CallID: "300-300-300", + SequenceID: 3, + Type: eventTypeUnknown, + Logger: loggerClient, + Payload: payload{ + Metadata: map[string]string{"header1": "value1"}, + Timeout: 20, + StatusCode: "UNKNOWN", + StatusMessage: "ok", + StatusDetails: []byte("ok"), + MessageLength: 3, + Message: []byte("wow"), + }, + Peer: address{ + Type: ipv4, + Address: "localhost", + IPPort: 16000, + }, + PayloadTruncated: false, + Authority: "server", + ServiceName: "grpc-testing", + MethodName: "UnaryRPC", + } + if _, err := json.Marshal(logEntry); err != nil { + t.Fatalf("json.Marshal(%v) failed with error: %v", logEntry, err) + } +} + +// TestMetadataTruncationAccountsKey tests that the metadata truncation takes +// into account both the key and value of metadata. It configures an +// observability system with a maximum byte length for metadata, which is +// greater than just the byte length of the metadata value but less than the +// byte length of the metadata key + metadata value. Thus, in the ClientHeader +// logging event, no metadata should be logged. +func (s) TestMetadataTruncationAccountsKey(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + const mdValue = "value" + configMetadataLimit := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: len(mdValue) + 1, + }, + }, + }, + } + + cleanup, err := setupObservabilitySystemWithConfig(configMetadataLimit) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // the set config MaxMetdataBytes is in between len(mdValue) and len("key") + // + len(mdValue), and thus shouldn't log this metadata entry. + md := metadata.MD{ + "key": []string{mdValue}, + } + ctx = metadata.NewOutgoingContext(ctx, md) + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: []byte("00000")}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + + grpcLogEntriesWant := []*grpcLogEntry{ + { + Type: eventTypeClientHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 1, + Payload: payload{ + Metadata: map[string]string{}, + }, + PayloadTruncated: true, + }, + { + Type: eventTypeClientMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 2, + Authority: ss.Address, + Payload: payload{ + MessageLength: 9, + Message: []uint8{}, + }, + PayloadTruncated: true, + }, + { + Type: eventTypeServerHeader, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 3, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + }, + }, + { + Type: eventTypeServerMessage, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + Authority: ss.Address, + SequenceID: 4, + }, + { + Type: eventTypeServerTrailer, + Logger: loggerClient, + ServiceName: "grpc.testing.TestService", + MethodName: "UnaryCall", + SequenceID: 5, + Authority: ss.Address, + Payload: payload{ + Metadata: map[string]string{}, + StatusCode: "OK", + }, + }, + } + fle.mu.Lock() + if err := cmpLoggingEntryList(fle.entries, grpcLogEntriesWant); err != nil { + fle.mu.Unlock() + t.Fatalf("error in logging entry list comparison %v", err) + } + fle.mu.Unlock() +} + +// TestMethodInConfiguration tests different method names with an expectation on +// whether they should error or not. +func (s) TestMethodInConfiguration(t *testing.T) { + // To skip creating a stackdriver exporter. + fle := &fakeLoggingExporter{ + t: t, + } + + defer func(ne func(ctx context.Context, config *config) (loggingExporter, error)) { + newLoggingExporter = ne + }(newLoggingExporter) + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + tests := []struct { + name string + config *config + wantErr string + }{ + { + name: "leading-slash", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"/service/method"}, + }, + }, + }, + }, + wantErr: "cannot have a leading slash", + }, + { + name: "wildcard service/method", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*/method"}, + }, + }, + }, + }, + wantErr: "cannot have service wildcard *", + }, + { + name: "/ in service name", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"ser/vice/method"}, + }, + }, + }, + }, + wantErr: "only one /", + }, + { + name: "empty method name", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"service/"}, + }, + }, + }, + }, + wantErr: "method name must be non empty", + }, + { + name: "normal", + config: &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"service/method"}, + }, + }, + }, + }, + wantErr: "", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cleanup, gotErr := setupObservabilitySystemWithConfig(test.config) + if cleanup != nil { + defer cleanup() + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Start(%v) = %v, wantErr %v", test.config, gotErr, test.wantErr) + } + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Start(%v) = %v, wantErr %v", test.config, gotErr, test.wantErr) + } + }) + } +} diff --git a/gcp/observability/observability.go b/gcp/observability/observability.go new file mode 100644 index 000000000000..6b7d4b1f762a --- /dev/null +++ b/gcp/observability/observability.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package observability implements the tracing, metrics, and logging data +// collection, and provides controlling knobs via a config file. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package observability + +import ( + "context" + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("observability") + +// Start is the opt-in API for gRPC Observability plugin. This function should +// be invoked in the main function, and before creating any gRPC clients or +// servers, otherwise, they might not be instrumented. At high-level, this +// module does the following: +// +// - it loads observability config from environment; +// - it registers default exporters if not disabled by the config; +// - it sets up telemetry collectors (binary logging sink or StatsHandlers). +// +// Note: this method should only be invoked once. +// Note: handle the error +func Start(ctx context.Context) error { + config, err := parseObservabilityConfig() + if err != nil { + return err + } + if config == nil { + return fmt.Errorf("no ObservabilityConfig found") + } + + // Set the project ID if it isn't configured manually. + if err = ensureProjectIDInObservabilityConfig(ctx, config); err != nil { + return err + } + + // Cleanup any created resources this function created in case this function + // errors. + defer func() { + if err != nil { + End() + } + }() + + // Enabling tracing and metrics via OpenCensus + if err = startOpenCensus(config); err != nil { + return fmt.Errorf("failed to instrument OpenCensus: %v", err) + } + + if err = startLogging(ctx, config); err != nil { + return fmt.Errorf("failed to start logging: %v", err) + } + + // Logging is controlled by the config at methods level. + return nil +} + +// End is the clean-up API for gRPC Observability plugin. It is expected to be +// invoked in the main function of the application. The suggested usage is +// "defer observability.End()". This function also flushes data to upstream, and +// cleanup resources. +// +// Note: this method should only be invoked once. +func End() { + stopLogging() + stopOpenCensus() +} diff --git a/gcp/observability/observability_test.go b/gcp/observability/observability_test.go new file mode 100644 index 000000000000..b2030d86b2fa --- /dev/null +++ b/gcp/observability/observability_test.go @@ -0,0 +1,1079 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/leakcheck" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/metadata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func init() { + // OpenCensus, once included in binary, will spawn a global goroutine + // recorder that is not controllable by application. + // https://github.com/census-instrumentation/opencensus-go/issues/1191 + leakcheck.RegisterIgnoreGoroutine("go.opencensus.io/stats/view.(*worker).start") + // google-cloud-go leaks HTTP client. They are aware of this: + // https://github.com/googleapis/google-cloud-go/issues/1183 + leakcheck.RegisterIgnoreGoroutine("internal/poll.runtime_pollWait") +} + +var ( + defaultTestTimeout = 10 * time.Second + testHeaderMetadata = metadata.MD{"header": []string{"HeADer"}} + testTrailerMetadata = metadata.MD{"trailer": []string{"TrAileR"}} + testOkPayload = []byte{72, 101, 108, 108, 111, 32, 87, 111, 114, 108, 100} + testErrorPayload = []byte{77, 97, 114, 116, 104, 97} + testErrorMessage = "test case injected error" + infinitySizeBytes int32 = 1024 * 1024 * 1024 + defaultRequestCount = 24 +) + +const ( + TypeOpenCensusViewDistribution string = "distribution" + TypeOpenCensusViewCount = "count" + TypeOpenCensusViewSum = "sum" + TypeOpenCensusViewLastValue = "last_value" +) + +type fakeOpenCensusExporter struct { + // The map of the observed View name and type + SeenViews map[string]string + // Number of spans + SeenSpans int + + idCh *testutils.Channel + + t *testing.T + mu sync.RWMutex +} + +func (fe *fakeOpenCensusExporter) ExportView(vd *view.Data) { + fe.mu.Lock() + defer fe.mu.Unlock() + for _, row := range vd.Rows { + fe.t.Logf("Metrics[%s]", vd.View.Name) + switch row.Data.(type) { + case *view.DistributionData: + fe.SeenViews[vd.View.Name] = TypeOpenCensusViewDistribution + case *view.CountData: + fe.SeenViews[vd.View.Name] = TypeOpenCensusViewCount + case *view.SumData: + fe.SeenViews[vd.View.Name] = TypeOpenCensusViewSum + case *view.LastValueData: + fe.SeenViews[vd.View.Name] = TypeOpenCensusViewLastValue + } + } +} + +type traceAndSpanID struct { + spanName string + traceID trace.TraceID + spanID trace.SpanID + isSampled bool + spanKind int +} + +type traceAndSpanIDString struct { + traceID string + spanID string + isSampled bool + // SpanKind is the type of span. + SpanKind int +} + +// idsToString is a helper that converts from generated trace and span IDs to +// the string version stored in trace message events. +func (tasi *traceAndSpanID) idsToString(projectID string) traceAndSpanIDString { + return traceAndSpanIDString{ + traceID: "projects/" + projectID + "/traces/" + tasi.traceID.String(), + spanID: tasi.spanID.String(), + isSampled: tasi.isSampled, + SpanKind: tasi.spanKind, + } +} + +func (fe *fakeOpenCensusExporter) ExportSpan(vd *trace.SpanData) { + if fe.idCh != nil { + // This is what export span sees representing the trace/span ID which + // will populate different contexts throughout the system, convert in + // caller to string version as the logging code does. + fe.idCh.Send(traceAndSpanID{ + spanName: vd.Name, + traceID: vd.TraceID, + spanID: vd.SpanID, + isSampled: vd.IsSampled(), + spanKind: vd.SpanKind, + }) + } + + fe.mu.Lock() + defer fe.mu.Unlock() + fe.SeenSpans++ + fe.t.Logf("Span[%v]", vd.Name) +} + +func (fe *fakeOpenCensusExporter) Flush() {} + +func (fe *fakeOpenCensusExporter) Close() error { + return nil +} + +func (s) TestRefuseStartWithInvalidPatterns(t *testing.T) { + invalidConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{":-)"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + invalidConfigJSON, err := json.Marshal(invalidConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + oldObservabilityConfig := envconfig.ObservabilityConfig + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfig = string(invalidConfigJSON) + envconfig.ObservabilityConfigFile = "" + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() + // If there is at least one invalid pattern, which should not be silently tolerated. + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } +} + +// TestRefuseStartWithExcludeAndWildCardAll tests the sceanrio where an +// observability configuration is provided with client RPC event specifying to +// exclude, and which matches on the '*' wildcard (any). This should cause an +// error when trying to start the observability system. +func (s) TestRefuseStartWithExcludeAndWildCardAll(t *testing.T) { + invalidConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + Exclude: true, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + invalidConfigJSON, err := json.Marshal(invalidConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + oldObservabilityConfig := envconfig.ObservabilityConfig + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfig = string(invalidConfigJSON) + envconfig.ObservabilityConfigFile = "" + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() + // If there is at least one invalid pattern, which should not be silently tolerated. + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } +} + +// createTmpConfigInFileSystem creates a random observability config at a random +// place in the temporary portion of the file system dependent on system. It +// also sets the environment variable GRPC_CONFIG_OBSERVABILITY_JSON to point to +// this created config. +func createTmpConfigInFileSystem(rawJSON string) (func(), error) { + configJSONFile, err := os.CreateTemp(os.TempDir(), "configJSON-") + if err != nil { + return nil, fmt.Errorf("cannot create file %v: %v", configJSONFile.Name(), err) + } + _, err = configJSONFile.Write(json.RawMessage(rawJSON)) + if err != nil { + return nil, fmt.Errorf("cannot write marshalled JSON: %v", err) + } + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfigFile = configJSONFile.Name() + return func() { + configJSONFile.Close() + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }, nil +} + +// TestJSONEnvVarSet tests a valid observability configuration specified by the +// GRPC_CONFIG_OBSERVABILITY_JSON environment variable, whose value represents a +// file path pointing to a JSON encoded config. +func (s) TestJSONEnvVarSet(t *testing.T) { + configJSON := `{ + "project_id": "fake" + }` + cleanup, err := createTmpConfigInFileSystem(configJSON) + defer cleanup() + + if err != nil { + t.Fatalf("failed to create config in file system: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := Start(ctx); err != nil { + t.Fatalf("error starting observability with valid config through file system: %v", err) + } + defer End() +} + +// TestBothConfigEnvVarsSet tests the scenario where both configuration +// environment variables are set. The file system environment variable should +// take precedence, and an error should return in the case of the file system +// configuration being invalid, even if the direct configuration environment +// variable is set and valid. +func (s) TestBothConfigEnvVarsSet(t *testing.T) { + invalidConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{":-)"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + invalidConfigJSON, err := json.Marshal(invalidConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + cleanup, err := createTmpConfigInFileSystem(string(invalidConfigJSON)) + defer cleanup() + if err != nil { + t.Fatalf("failed to create config in file system: %v", err) + } + // This configuration should be ignored, as precedence 2. + validConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + validConfigJSON, err := json.Marshal(validConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + oldObservabilityConfig := envconfig.ObservabilityConfig + envconfig.ObservabilityConfig = string(validConfigJSON) + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + }() + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } +} + +// TestErrInFileSystemEnvVar tests the scenario where an observability +// configuration is specified with environment variable that specifies a +// location in the file system for configuration, and this location doesn't have +// a file (or valid configuration). +func (s) TestErrInFileSystemEnvVar(t *testing.T) { + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfigFile = "/this-file/does-not-exist" + defer func() { + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid file system path not triggering error") + } +} + +func (s) TestNoEnvSet(t *testing.T) { + oldObservabilityConfig := envconfig.ObservabilityConfig + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfig = "" + envconfig.ObservabilityConfigFile = "" + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() + // If there is no observability config set at all, the Start should return an error. + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } +} + +func (s) TestOpenCensusIntegration(t *testing.T) { + defaultMetricsReportingInterval = time.Millisecond * 100 + fe := &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: t} + + defer func(ne func(config *config) (tracingMetricsExporter, error)) { + newExporter = ne + }(newExporter) + + newExporter = func(config *config) (tracingMetricsExporter, error) { + return fe, nil + } + + openCensusOnConfig := &config{ + ProjectID: "fake", + CloudMonitoring: &cloudMonitoring{}, + CloudTrace: &cloudTrace{ + SamplingRate: 1.0, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(openCensusOnConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + } + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + for i := 0; i < defaultRequestCount; i++ { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + } + t.Logf("unary call passed count=%v", defaultRequestCount) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + var errs []error + for ctx.Err() == nil { + errs = nil + fe.mu.RLock() + if value := fe.SeenViews["grpc.io/client/api_latency"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/api_latency: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/client/started_rpcs"]; value != TypeOpenCensusViewCount { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/started_rpcs: %s != %s", value, TypeOpenCensusViewCount)) + } + if value := fe.SeenViews["grpc.io/server/started_rpcs"]; value != TypeOpenCensusViewCount { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/started_rpcs: %s != %s", value, TypeOpenCensusViewCount)) + } + + if value := fe.SeenViews["grpc.io/client/completed_rpcs"]; value != TypeOpenCensusViewCount { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/completed_rpcs: %s != %s", value, TypeOpenCensusViewCount)) + } + if value := fe.SeenViews["grpc.io/server/completed_rpcs"]; value != TypeOpenCensusViewCount { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/completed_rpcs: %s != %s", value, TypeOpenCensusViewCount)) + } + if value := fe.SeenViews["grpc.io/client/roundtrip_latency"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/completed_rpcs: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/server/server_latency"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("grpc.io/server/server_latency: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/client/sent_compressed_message_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/sent_compressed_message_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/client/received_compressed_message_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/client/received_compressed_message_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/server/sent_compressed_message_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/sent_compressed_message_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if value := fe.SeenViews["grpc.io/server/received_compressed_message_bytes_per_rpc"]; value != TypeOpenCensusViewDistribution { + errs = append(errs, fmt.Errorf("unexpected type for grpc.io/server/received_compressed_message_bytes_per_rpc: %s != %s", value, TypeOpenCensusViewDistribution)) + } + if fe.SeenSpans <= 0 { + errs = append(errs, fmt.Errorf("unexpected number of seen spans: %v <= 0", fe.SeenSpans)) + } + fe.mu.RUnlock() + if len(errs) == 0 { + break + } + time.Sleep(100 * time.Millisecond) + } + if len(errs) != 0 { + t.Fatalf("Invalid OpenCensus export data: %v", errs) + } +} + +// TestCustomTagsTracingMetrics verifies that the custom tags defined in our +// observability configuration and set to two hardcoded values are passed to the +// function to create an exporter. +func (s) TestCustomTagsTracingMetrics(t *testing.T) { + defer func(ne func(config *config) (tracingMetricsExporter, error)) { + newExporter = ne + }(newExporter) + fe := &fakeOpenCensusExporter{SeenViews: make(map[string]string), t: t} + newExporter = func(config *config) (tracingMetricsExporter, error) { + ct := config.Labels + if len(ct) < 1 { + t.Fatalf("less than 2 custom tags sent in") + } + if val, ok := ct["customtag1"]; !ok || val != "wow" { + t.Fatalf("incorrect custom tag: got %v, want %v", val, "wow") + } + if val, ok := ct["customtag2"]; !ok || val != "nice" { + t.Fatalf("incorrect custom tag: got %v, want %v", val, "nice") + } + return fe, nil + } + + // This configuration present in file system and it's defined custom tags should make it + // to the created exporter. + configJSON := `{ + "project_id": "fake", + "cloud_trace": {}, + "cloud_monitoring": {"sampling_rate": 1.0}, + "labels":{"customtag1":"wow","customtag2":"nice"} + }` + + cleanup, err := createTmpConfigInFileSystem(configJSON) + defer cleanup() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err = Start(ctx) + defer End() + if err != nil { + t.Fatalf("Start() failed with err: %v", err) + } +} + +// TestStartErrorsThenEnd tests that an End call after Start errors works +// without problems, as this is a possible codepath in the public observability +// API. +func (s) TestStartErrorsThenEnd(t *testing.T) { + invalidConfig := &config{ + ProjectID: "fake", + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{":-)"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + } + invalidConfigJSON, err := json.Marshal(invalidConfig) + if err != nil { + t.Fatalf("failed to convert config to JSON: %v", err) + } + oldObservabilityConfig := envconfig.ObservabilityConfig + oldObservabilityConfigFile := envconfig.ObservabilityConfigFile + envconfig.ObservabilityConfig = string(invalidConfigJSON) + envconfig.ObservabilityConfigFile = "" + defer func() { + envconfig.ObservabilityConfig = oldObservabilityConfig + envconfig.ObservabilityConfigFile = oldObservabilityConfigFile + }() + if err := Start(context.Background()); err == nil { + t.Fatalf("Invalid patterns not triggering error") + } + End() +} + +// TestLoggingLinkedWithTraceClientSide tests that client side logs get the +// trace and span id corresponding to the created Call Level Span for the RPC. +func (s) TestLoggingLinkedWithTraceClientSide(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + oldNewLoggingExporter := newLoggingExporter + defer func() { + newLoggingExporter = oldNewLoggingExporter + }() + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + idCh := testutils.NewChannel() + + fe := &fakeOpenCensusExporter{ + t: t, + idCh: idCh, + } + oldNewExporter := newExporter + defer func() { + newExporter = oldNewExporter + }() + + newExporter = func(config *config) (tracingMetricsExporter, error) { + return fe, nil + } + + const projectID = "project-id" + tracesAndLogsConfig := &config{ + ProjectID: projectID, + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + CloudTrace: &cloudTrace{ + SamplingRate: 1.0, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(tracesAndLogsConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Spawn a goroutine to receive the trace and span ids received by the + // exporter corresponding to a Unary RPC. + readerErrCh := testutils.NewChannel() + unaryDone := grpcsync.NewEvent() + go func() { + var traceAndSpanIDs []traceAndSpanID + val, err := idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok := val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + <-unaryDone.Done() + var tasiSent traceAndSpanIDString + for _, tasi := range traceAndSpanIDs { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindClient { + tasiSent = tasi.idsToString(projectID) + continue + } + } + + fle.mu.Lock() + for _, tasiSeen := range fle.idsSeen { + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff != "" { + readerErrCh.Send(fmt.Errorf("got unexpected id, should be a client span (-got, +want): %v", diff)) + } + } + + fle.entries = nil + fle.mu.Unlock() + readerErrCh.Send(nil) + }() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + unaryDone.Fire() + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } +} + +// TestLoggingLinkedWithTraceServerSide tests that server side logs get the +// trace and span id corresponding to the created Server Span for the RPC. +func (s) TestLoggingLinkedWithTraceServerSide(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + oldNewLoggingExporter := newLoggingExporter + defer func() { + newLoggingExporter = oldNewLoggingExporter + }() + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + idCh := testutils.NewChannel() + + fe := &fakeOpenCensusExporter{ + t: t, + idCh: idCh, + } + oldNewExporter := newExporter + defer func() { + newExporter = oldNewExporter + }() + + newExporter = func(config *config) (tracingMetricsExporter, error) { + return fe, nil + } + + const projectID = "project-id" + tracesAndLogsConfig := &config{ + ProjectID: projectID, + CloudLogging: &cloudLogging{ + ServerRPCEvents: []serverRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + CloudTrace: &cloudTrace{ + SamplingRate: 1.0, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(tracesAndLogsConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Spawn a goroutine to receive the trace and span ids received by the + // exporter corresponding to a Unary RPC. + readerErrCh := testutils.NewChannel() + unaryDone := grpcsync.NewEvent() + go func() { + var traceAndSpanIDs []traceAndSpanID + val, err := idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok := val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + <-unaryDone.Done() + var tasiServer traceAndSpanIDString + for _, tasi := range traceAndSpanIDs { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindServer { + tasiServer = tasi.idsToString(projectID) + continue + } + } + + fle.mu.Lock() + for _, tasiSeen := range fle.idsSeen { + if diff := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff != "" { + readerErrCh.Send(fmt.Errorf("got unexpected id, should be a server span (-got, +want): %v", diff)) + } + } + + fle.entries = nil + fle.mu.Unlock() + readerErrCh.Send(nil) + }() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + unaryDone.Fire() + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } +} + +// TestLoggingLinkedWithTrace tests that client and server side logs get the +// trace and span id corresponding to either the Call Level Span or Server Span +// (no determinism, so can only assert one or the other), for Unary and +// Streaming RPCs. +func (s) TestLoggingLinkedWithTrace(t *testing.T) { + fle := &fakeLoggingExporter{ + t: t, + } + oldNewLoggingExporter := newLoggingExporter + defer func() { + newLoggingExporter = oldNewLoggingExporter + }() + + newLoggingExporter = func(ctx context.Context, config *config) (loggingExporter, error) { + return fle, nil + } + + idCh := testutils.NewChannel() + + fe := &fakeOpenCensusExporter{ + t: t, + idCh: idCh, + } + oldNewExporter := newExporter + defer func() { + newExporter = oldNewExporter + }() + + newExporter = func(config *config) (tracingMetricsExporter, error) { + return fe, nil + } + + const projectID = "project-id" + tracesAndLogsConfig := &config{ + ProjectID: projectID, + CloudLogging: &cloudLogging{ + ClientRPCEvents: []clientRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + ServerRPCEvents: []serverRPCEvents{ + { + Methods: []string{"*"}, + MaxMetadataBytes: 30, + MaxMessageBytes: 30, + }, + }, + }, + CloudTrace: &cloudTrace{ + SamplingRate: 1.0, + }, + } + cleanup, err := setupObservabilitySystemWithConfig(tracesAndLogsConfig) + if err != nil { + t.Fatalf("error setting up observability %v", err) + } + defer cleanup() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != io.EOF { + return err + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Spawn a goroutine to receive the trace and span ids received by the + // exporter corresponding to a Unary RPC. + readerErrCh := testutils.NewChannel() + unaryDone := grpcsync.NewEvent() + go func() { + var traceAndSpanIDs []traceAndSpanID + val, err := idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok := val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + <-unaryDone.Done() + var tasiSent traceAndSpanIDString + var tasiServer traceAndSpanIDString + for _, tasi := range traceAndSpanIDs { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindClient { + tasiSent = tasi.idsToString(projectID) + continue + } + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindServer { + tasiServer = tasi.idsToString(projectID) + } + } + + fle.mu.Lock() + for _, tasiSeen := range fle.idsSeen { + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff != "" { + if diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff2 != "" { + readerErrCh.Send(fmt.Errorf("got unexpected id, should be a client or server span (-got, +want): %v, %v", diff, diff2)) + } + } + } + + fle.entries = nil + fle.mu.Unlock() + readerErrCh.Send(nil) + }() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{Body: testOkPayload}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + unaryDone.Fire() + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } + + fle.mu.Lock() + fle.idsSeen = nil + fle.mu.Unlock() + + // Test streaming. Spawn a goroutine to receive the trace and span ids + // received by the exporter corresponding to a streaming RPC. + readerErrCh = testutils.NewChannel() + streamDone := grpcsync.NewEvent() + go func() { + var traceAndSpanIDs []traceAndSpanID + + val, err := idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok := val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + + val, err = idCh.Receive(ctx) + if err != nil { + readerErrCh.Send(fmt.Errorf("error while waiting for IDs: %v", err)) + } + tasi, ok = val.(traceAndSpanID) + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", val)) + } + traceAndSpanIDs = append(traceAndSpanIDs, tasi) + <-streamDone.Done() + var tasiSent traceAndSpanIDString + var tasiServer traceAndSpanIDString + for _, tasi := range traceAndSpanIDs { + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindClient { + tasiSent = tasi.idsToString(projectID) + continue + } + if strings.HasPrefix(tasi.spanName, "grpc.") && tasi.spanKind == trace.SpanKindServer { + tasiServer = tasi.idsToString(projectID) + } + } + + fle.mu.Lock() + for _, tasiSeen := range fle.idsSeen { + if diff := cmp.Diff(tasiSeen, &tasiSent, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff != "" { + if diff2 := cmp.Diff(tasiSeen, &tasiServer, cmp.AllowUnexported(traceAndSpanIDString{}), cmpopts.IgnoreFields(traceAndSpanIDString{}, "SpanKind")); diff2 != "" { + readerErrCh.Send(fmt.Errorf("got unexpected id, should be a client or server span (-got, +want): %v, %v", diff, diff2)) + } + } + } + + fle.entries = nil + fle.mu.Unlock() + readerErrCh.Send(nil) + }() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + streamDone.Fire() + + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } +} diff --git a/gcp/observability/opencensus.go b/gcp/observability/opencensus.go new file mode 100644 index 000000000000..abd3cb55b803 --- /dev/null +++ b/gcp/observability/opencensus.go @@ -0,0 +1,179 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package observability + +import ( + "fmt" + "os" + "strconv" + "time" + + "contrib.go.opencensus.io/exporter/stackdriver" + "contrib.go.opencensus.io/exporter/stackdriver/monitoredresource" + + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "google.golang.org/grpc" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats/opencensus" +) + +var ( + // It's a variable instead of const to speed up testing + defaultMetricsReportingInterval = time.Second * 30 + defaultViews = []*view.View{ + opencensus.ClientStartedRPCsView, + opencensus.ClientCompletedRPCsView, + opencensus.ClientRoundtripLatencyView, + opencensus.ClientSentCompressedMessageBytesPerRPCView, + opencensus.ClientReceivedCompressedMessageBytesPerRPCView, + opencensus.ClientAPILatencyView, + opencensus.ServerStartedRPCsView, + opencensus.ServerCompletedRPCsView, + opencensus.ServerSentCompressedMessageBytesPerRPCView, + opencensus.ServerReceivedCompressedMessageBytesPerRPCView, + opencensus.ServerLatencyView, + } +) + +func labelsToMonitoringLabels(labels map[string]string) *stackdriver.Labels { + sdLabels := &stackdriver.Labels{} + for k, v := range labels { + sdLabels.Set(k, v, "") + } + return sdLabels +} + +func labelsToTraceAttributes(labels map[string]string) map[string]interface{} { + ta := make(map[string]interface{}, len(labels)) + for k, v := range labels { + ta[k] = v + } + return ta +} + +type tracingMetricsExporter interface { + trace.Exporter + view.Exporter + Flush() + Close() error +} + +var exporter tracingMetricsExporter + +var newExporter = newStackdriverExporter + +func newStackdriverExporter(config *config) (tracingMetricsExporter, error) { + // Create the Stackdriver exporter, which is shared between tracing and stats + mr := monitoredresource.Autodetect() + logger.Infof("Detected MonitoredResource:: %+v", mr) + var err error + // Custom labels completly overwrite any labels generated in the OpenCensus + // library, including their label that uniquely identifies the process. + // Thus, generate a unique process identifier here to uniquely identify + // process for metrics exporting to function correctly. + metricsLabels := make(map[string]string, len(config.Labels)+1) + for k, v := range config.Labels { + metricsLabels[k] = v + } + metricsLabels["opencensus_task"] = generateUniqueProcessIdentifier() + exporter, err := stackdriver.NewExporter(stackdriver.Options{ + ProjectID: config.ProjectID, + MonitoredResource: mr, + DefaultMonitoringLabels: labelsToMonitoringLabels(metricsLabels), + DefaultTraceAttributes: labelsToTraceAttributes(config.Labels), + MonitoringClientOptions: cOptsDisableLogTrace, + TraceClientOptions: cOptsDisableLogTrace, + }) + if err != nil { + return nil, fmt.Errorf("failed to create Stackdriver exporter: %v", err) + } + return exporter, nil +} + +// generateUniqueProcessIdentifier returns a unique process identifier for the +// process this code is running in. This is the same way the OpenCensus library +// generates the unique process identifier, in the format of +// "go-@". +func generateUniqueProcessIdentifier() string { + hostname, err := os.Hostname() + if err != nil { + hostname = "localhost" + } + return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname +} + +// This method accepts config and exporter; the exporter argument is exposed to +// assist unit testing of the OpenCensus behavior. +func startOpenCensus(config *config) error { + // If both tracing and metrics are disabled, there's no point inject default + // StatsHandler. + if config == nil || (config.CloudTrace == nil && config.CloudMonitoring == nil) { + return nil + } + + var err error + exporter, err = newExporter(config) + if err != nil { + return err + } + + var to opencensus.TraceOptions + if config.CloudTrace != nil { + to.TS = trace.ProbabilitySampler(config.CloudTrace.SamplingRate) + trace.RegisterExporter(exporter.(trace.Exporter)) + logger.Infof("Start collecting and exporting trace spans with global_trace_sampling_rate=%.2f", config.CloudTrace.SamplingRate) + } + + if config.CloudMonitoring != nil { + if err := view.Register(defaultViews...); err != nil { + return fmt.Errorf("failed to register observability views: %v", err) + } + view.SetReportingPeriod(defaultMetricsReportingInterval) + view.RegisterExporter(exporter.(view.Exporter)) + logger.Infof("Start collecting and exporting metrics") + } + + internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(opencensus.ServerOption(to)) + internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(opencensus.DialOption(to)) + logger.Infof("Enabled OpenCensus StatsHandlers for clients and servers") + + return nil +} + +// stopOpenCensus flushes the exporter's and cleans up globals across all +// packages if exporter was created. +func stopOpenCensus() { + if exporter != nil { + internal.ClearGlobalDialOptions() + internal.ClearGlobalServerOptions() + // This Unregister call guarantees the data recorded gets sent to + // exporter, synchronising the view package and exporter. Doesn't matter + // if views not registered, will be a noop if not registered. + view.Unregister(defaultViews...) + // Call these unconditionally, doesn't matter if not registered, will be + // a noop if not registered. + trace.UnregisterExporter(exporter) + view.UnregisterExporter(exporter) + + // This Flush call makes sure recorded telemetry get sent to backend. + exporter.Flush() + exporter.Close() + } +} diff --git a/go.mod b/go.mod index b177cfa66df5..acd6f919f793 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,31 @@ module google.golang.org/grpc -go 1.11 +go 1.17 require ( - github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 - github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.4.2 - github.com/google/go-cmp v0.5.0 - github.com/google/uuid v1.1.2 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/protobuf v1.25.0 + github.com/cespare/xxhash/v2 v2.2.0 + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 + github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f + github.com/golang/glog v1.1.0 + github.com/golang/protobuf v1.5.3 + github.com/google/go-cmp v0.5.9 + github.com/google/uuid v1.3.0 + golang.org/x/net v0.9.0 + golang.org/x/oauth2 v0.7.0 + golang.org/x/sync v0.0.0-20190423024810-112230192c58 + golang.org/x/sys v0.7.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 + google.golang.org/protobuf v1.30.0 +) + +require ( + cloud.google.com/go/compute v1.19.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect ) diff --git a/go.sum b/go.sum index bb25cd49156d..98a106b2a17c 100644 --- a/go.sum +++ b/go.sum @@ -1,49 +1,46 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -52,48 +49,50 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/grpclog/loggerv2.go b/grpclog/loggerv2.go index 4ee33171e008..5de66e40d365 100644 --- a/grpclog/loggerv2.go +++ b/grpclog/loggerv2.go @@ -19,11 +19,13 @@ package grpclog import ( + "encoding/json" + "fmt" "io" - "io/ioutil" "log" "os" "strconv" + "strings" "google.golang.org/grpc/internal/grpclog" ) @@ -95,8 +97,9 @@ var severityName = []string{ // loggerT is the default logger used by grpclog. type loggerT struct { - m []*log.Logger - v int + m []*log.Logger + v int + jsonFormat bool } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -105,27 +108,40 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { var m []*log.Logger - m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) - m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) - return &loggerT{m: m, v: v} + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} } // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -142,58 +158,79 @@ func newLoggerV2() LoggerV2 { if vl, err := strconv.Atoi(vLevel); err == nil { v = vl } - return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) } func (g *loggerT) Info(args ...interface{}) { - g.m[infoLog].Print(args...) + g.output(infoLog, fmt.Sprint(args...)) } func (g *loggerT) Infoln(args ...interface{}) { - g.m[infoLog].Println(args...) + g.output(infoLog, fmt.Sprintln(args...)) } func (g *loggerT) Infof(format string, args ...interface{}) { - g.m[infoLog].Printf(format, args...) + g.output(infoLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Warning(args ...interface{}) { - g.m[warningLog].Print(args...) + g.output(warningLog, fmt.Sprint(args...)) } func (g *loggerT) Warningln(args ...interface{}) { - g.m[warningLog].Println(args...) + g.output(warningLog, fmt.Sprintln(args...)) } func (g *loggerT) Warningf(format string, args ...interface{}) { - g.m[warningLog].Printf(format, args...) + g.output(warningLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Error(args ...interface{}) { - g.m[errorLog].Print(args...) + g.output(errorLog, fmt.Sprint(args...)) } func (g *loggerT) Errorln(args ...interface{}) { - g.m[errorLog].Println(args...) + g.output(errorLog, fmt.Sprintln(args...)) } func (g *loggerT) Errorf(format string, args ...interface{}) { - g.m[errorLog].Printf(format, args...) + g.output(errorLog, fmt.Sprintf(format, args...)) } func (g *loggerT) Fatal(args ...interface{}) { - g.m[fatalLog].Fatal(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) } func (g *loggerT) Fatalln(args ...interface{}) { - g.m[fatalLog].Fatalln(args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) } func (g *loggerT) Fatalf(format string, args ...interface{}) { - g.m[fatalLog].Fatalf(format, args...) - // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) } func (g *loggerT) V(l int) bool { @@ -204,18 +241,18 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/grpclog/loggerv2_test.go b/grpclog/loggerv2_test.go index 756f215f9c86..119cea4c6ecd 100644 --- a/grpclog/loggerv2_test.go +++ b/grpclog/loggerv2_test.go @@ -52,9 +52,10 @@ func TestLoggerV2Severity(t *testing.T) { } // check if b is in the format of: -// WARNING: 2017/04/07 14:55:42 WARNING +// +// 2017/04/07 14:55:42 WARNING: WARNING func checkLogForSeverity(s int, b []byte) error { - expected := regexp.MustCompile(fmt.Sprintf(`^%s: [0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s\n$`, severityName[s], severityName[s])) + expected := regexp.MustCompile(fmt.Sprintf(`^[0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} %s: %s\n$`, severityName[s], severityName[s])) if m := expected.Match(b); !m { return fmt.Errorf("got: %v, want string in format of: %v", string(b), severityName[s]+": 2016/10/05 17:09:26 "+severityName[s]) } diff --git a/health/grpc_health_v1/health.pb.go b/health/grpc_health_v1/health.pb.go index a66024d23e30..142d35f753e9 100644 --- a/health/grpc_health_v1/health.pb.go +++ b/health/grpc_health_v1/health.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HealthCheckResponse_ServingStatus int32 const ( diff --git a/health/grpc_health_v1/health_grpc.pb.go b/health/grpc_health_v1/health_grpc.pb.go index 386d16ce62d1..a01a1b4d54bd 100644 --- a/health/grpc_health_v1/health_grpc.pb.go +++ b/health/grpc_health_v1/health_grpc.pb.go @@ -1,4 +1,25 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/health/v1/health.proto package grpc_health_v1 @@ -14,6 +35,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" +) + // HealthClient is the client API for Health service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -49,7 +75,7 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -57,7 +83,7 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) if err != nil { return nil, err } @@ -145,7 +171,7 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.health.v1.Health/Check", + FullMethod: Health_Check_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) diff --git a/idle.go b/idle.go new file mode 100644 index 000000000000..dc3dc72f6b09 --- /dev/null +++ b/idle.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type idlenessEnforcer interface { + exitIdleMode() error + enterIdleMode() error +} + +// idlenessManager defines the functionality required to track RPC activity on a +// channel. +type idlenessManager interface { + onCallBegin() error + onCallEnd() + close() +} + +type noopIdlenessManager struct{} + +func (noopIdlenessManager) onCallBegin() error { return nil } +func (noopIdlenessManager) onCallEnd() {} +func (noopIdlenessManager) close() {} + +// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +// operations to synchronize access to shared state and a mutex to guarantee +// mutual exclusion in a critical section. +type idlenessManagerImpl struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and onCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// newIdlenessManager creates a new idleness manager implementation for the +// given idle timeout. +func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { + if idleTimeout == 0 { + return noopIdlenessManager{} + } + + i := &idlenessManagerImpl{ + enforcer: enforcer, + timeout: int64(idleTimeout), + } + i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) + return i +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if i.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + i.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (i *idlenessManagerImpl) handleIdleTimeout() { + if i.isClosed() { + return + } + + if atomic.LoadInt32(&i.activeCallsCount) > 0 { + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) + i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if i.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.resetIdleTimer(time.Duration(i.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := i.enforcer.enterIdleMode(); err != nil { + logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + i.actuallyIdle = true + return true +} + +// onCallBegin is invoked at the start of every RPC. +func (i *idlenessManagerImpl) onCallBegin() error { + if i.isClosed() { + return nil + } + + if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := i.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&i.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (i *idlenessManagerImpl) exitIdleMode() error { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if !i.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and onCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in onCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := i.enforcer.exitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + return nil +} + +// onCallEnd is invoked at the end of every RPC. +func (i *idlenessManagerImpl) onCallEnd() { + if i.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&i.activeCallsCount, -1) +} + +func (i *idlenessManagerImpl) isClosed() bool { + return atomic.LoadInt32(&i.closed) == 1 +} + +func (i *idlenessManagerImpl) close() { + atomic.StoreInt32(&i.closed, 1) + + i.idleMu.Lock() + i.timer.Stop() + i.timer = nil + i.idleMu.Unlock() +} diff --git a/idle_test.go b/idle_test.go new file mode 100644 index 000000000000..a20b4e09947b --- /dev/null +++ b/idle_test.go @@ -0,0 +1,360 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" +) + +const ( + defaultTestIdleTimeout = 500 * time.Millisecond // A short idle_timeout for tests. + defaultTestShortTimeout = 10 * time.Millisecond // A small deadline to wait for events expected to not happen. +) + +type testIdlenessEnforcer struct { + exitIdleCh chan struct{} + enterIdleCh chan struct{} +} + +func (ti *testIdlenessEnforcer) exitIdleMode() error { + ti.exitIdleCh <- struct{}{} + return nil + +} + +func (ti *testIdlenessEnforcer) enterIdleMode() error { + ti.enterIdleCh <- struct{}{} + return nil + +} + +func newTestIdlenessEnforcer() *testIdlenessEnforcer { + return &testIdlenessEnforcer{ + exitIdleCh: make(chan struct{}, 1), + enterIdleCh: make(chan struct{}, 1), + } +} + +// overrideNewTimer overrides the new timer creation function by ensuring that a +// message is pushed on the returned channel everytime the timer fires. +func overrideNewTimer(t *testing.T) <-chan struct{} { + t.Helper() + + ch := make(chan struct{}, 1) + origTimeAfterFunc := timeAfterFunc + timeAfterFunc = func(d time.Duration, callback func()) *time.Timer { + return time.AfterFunc(d, func() { + select { + case ch <- struct{}{}: + default: + } + callback() + }) + } + t.Cleanup(func() { timeAfterFunc = origTimeAfterFunc }) + return ch +} + +// TestIdlenessManager_Disabled tests the case where the idleness manager is +// disabled by passing an idle_timeout of 0. Verifies the following things: +// - timer callback does not fire +// - an RPC does not trigger a call to exitIdleMode on the ClientConn +// - more calls to RPC termination (as compared to RPC initiation) does not +// result in an error log +func (s) TestIdlenessManager_Disabled(t *testing.T) { + callbackCh := overrideNewTimer(t) + + // Create an idleness manager that is disabled because of idleTimeout being + // set to `0`. + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(0)) + + // Ensure that the timer callback does not fire within a short deadline. + select { + case <-callbackCh: + t.Fatal("Idle timer callback fired when manager is disabled") + case <-time.After(defaultTestShortTimeout): + } + + // The first invocation of onCallBegin() would lead to a call to + // exitIdleMode() on the enforcer, unless the idleness manager is disabled. + mgr.onCallBegin() + select { + case <-enforcer.exitIdleCh: + t.Fatalf("exitIdleMode() called on enforcer when manager is disabled") + case <-time.After(defaultTestShortTimeout): + } + + // If the number of calls to onCallEnd() exceeds the number of calls to + // onCallBegin(), the idleness manager is expected to throw an error log + // (which will cause our TestLogger to fail the test). But since the manager + // is disabled, this should not happen. + mgr.onCallEnd() + mgr.onCallEnd() + + // The idleness manager is explicitly not closed here. But since the manager + // is disabled, it will not start the run goroutine, and hence we expect the + // leakchecker to not find any leaked goroutines. +} + +// TestIdlenessManager_Enabled_TimerFires tests the case where the idle manager +// is enabled. Ensures that when there are no RPCs, the timer callback is +// invoked and the enterIdleMode() method is invoked on the enforcer. +func (s) TestIdlenessManager_Enabled_TimerFires(t *testing.T) { + callbackCh := overrideNewTimer(t) + + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(defaultTestIdleTimeout)) + defer mgr.close() + + // Ensure that the timer callback fires within a appropriate amount of time. + select { + case <-callbackCh: + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for idle timer callback to fire") + } + + // Ensure that the channel moves to idle mode eventually. + select { + case <-enforcer.enterIdleCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout waiting for channel to move to idle") + } +} + +// TestIdlenessManager_Enabled_OngoingCall tests the case where the idle manager +// is enabled. Ensures that when there is an ongoing RPC, the channel does not +// enter idle mode. +func (s) TestIdlenessManager_Enabled_OngoingCall(t *testing.T) { + callbackCh := overrideNewTimer(t) + + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(defaultTestIdleTimeout)) + defer mgr.close() + + // Fire up a goroutine that simulates an ongoing RPC that is terminated + // after the timer callback fires for the first time. + timerFired := make(chan struct{}) + go func() { + mgr.onCallBegin() + <-timerFired + mgr.onCallEnd() + }() + + // Ensure that the timer callback fires and unblock the above goroutine. + select { + case <-callbackCh: + close(timerFired) + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for idle timer callback to fire") + } + + // The invocation of the timer callback should not put the channel in idle + // mode since we had an ongoing RPC. + select { + case <-enforcer.enterIdleCh: + t.Fatalf("enterIdleMode() called on enforcer when active RPC exists") + case <-time.After(defaultTestShortTimeout): + } + + // Since we terminated the ongoing RPC and we have no other active RPCs, the + // channel must move to idle eventually. + select { + case <-enforcer.enterIdleCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout waiting for channel to move to idle") + } +} + +// TestIdlenessManager_Enabled_ActiveSinceLastCheck tests the case where the +// idle manager is enabled. Ensures that when there are active RPCs in the last +// period (even though there is no active call when the timer fires), the +// channel does not enter idle mode. +func (s) TestIdlenessManager_Enabled_ActiveSinceLastCheck(t *testing.T) { + callbackCh := overrideNewTimer(t) + + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(defaultTestIdleTimeout)) + defer mgr.close() + + // Fire up a goroutine that simulates unary RPCs until the timer callback + // fires. + timerFired := make(chan struct{}) + go func() { + for ; ; <-time.After(defaultTestShortTimeout) { + mgr.onCallBegin() + mgr.onCallEnd() + + select { + case <-timerFired: + return + default: + } + } + }() + + // Ensure that the timer callback fires, and that we don't enter idle as + // part of this invocation of the timer callback, since we had some RPCs in + // this period. + select { + case <-callbackCh: + close(timerFired) + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for idle timer callback to fire") + } + select { + case <-enforcer.enterIdleCh: + t.Fatalf("enterIdleMode() called on enforcer when one RPC completed in the last period") + case <-time.After(defaultTestShortTimeout): + } + + // Since the unrary RPC terminated and we have no other active RPCs, the + // channel must move to idle eventually. + select { + case <-enforcer.enterIdleCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout waiting for channel to move to idle") + } +} + +// TestIdlenessManager_Enabled_ExitIdleOnRPC tests the case where the idle +// manager is enabled. Ensures that the channel moves out of idle when an RPC is +// initiated. +func (s) TestIdlenessManager_Enabled_ExitIdleOnRPC(t *testing.T) { + overrideNewTimer(t) + + enforcer := newTestIdlenessEnforcer() + mgr := newIdlenessManager(enforcer, time.Duration(defaultTestIdleTimeout)) + defer mgr.close() + + // Ensure that the channel moves to idle since there are no RPCs. + select { + case <-enforcer.enterIdleCh: + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for channel to move to idle mode") + } + + for i := 0; i < 100; i++ { + // A call to onCallBegin and onCallEnd simulates an RPC. + go func() { + if err := mgr.onCallBegin(); err != nil { + t.Errorf("onCallBegin() failed: %v", err) + } + mgr.onCallEnd() + }() + } + + // Ensure that the channel moves out of idle as a result of the above RPC. + select { + case <-enforcer.exitIdleCh: + case <-time.After(2 * defaultTestIdleTimeout): + t.Fatal("Timeout waiting for channel to move out of idle mode") + } + + // Ensure that only one call to exit idle mode is made to the CC. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-enforcer.exitIdleCh: + t.Fatal("More than one call to exit idle mode on the ClientConn; only one expected") + case <-sCtx.Done(): + } +} + +type racyIdlenessState int32 + +const ( + stateInital racyIdlenessState = iota + stateEnteredIdle + stateExitedIdle + stateActiveRPCs +) + +// racyIdlnessEnforcer is a test idleness enforcer used specifically to test the +// race between idle timeout and incoming RPCs. +type racyIdlenessEnforcer struct { + state *racyIdlenessState // Accessed atomically. +} + +// exitIdleMode sets the internal state to stateExitedIdle. We should only ever +// exit idle when we are currently in idle. +func (ri *racyIdlenessEnforcer) exitIdleMode() error { + if !atomic.CompareAndSwapInt32((*int32)(ri.state), int32(stateEnteredIdle), int32(stateExitedIdle)) { + return fmt.Errorf("idleness enforcer asked to exit idle when it did not enter idle earlier") + } + return nil +} + +// enterIdleMode attempts to set the internal state to stateEnteredIdle. We should only ever enter idle before RPCs start. +func (ri *racyIdlenessEnforcer) enterIdleMode() error { + if !atomic.CompareAndSwapInt32((*int32)(ri.state), int32(stateInital), int32(stateEnteredIdle)) { + return fmt.Errorf("idleness enforcer asked to enter idle after rpcs started") + } + return nil +} + +// TestIdlenessManager_IdleTimeoutRacesWithOnCallBegin tests the case where +// firing of the idle timeout races with an incoming RPC. The test verifies that +// if the timer callback win the race and puts the channel in idle, the RPCs can +// kick it out of idle. And if the RPCs win the race and keep the channel +// active, then the timer callback should not attempt to put the channel in idle +// mode. +func (s) TestIdlenessManager_IdleTimeoutRacesWithOnCallBegin(t *testing.T) { + // Run multiple iterations to simulate different possibilities. + for i := 0; i < 10; i++ { + t.Run(fmt.Sprintf("iteration=%d", i), func(t *testing.T) { + var idlenessState racyIdlenessState + enforcer := &racyIdlenessEnforcer{state: &idlenessState} + + // Configure a large idle timeout so that we can control the + // race between the timer callback and RPCs. + mgr := newIdlenessManager(enforcer, time.Duration(10*time.Minute)) + defer mgr.close() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + m := mgr.(interface{ handleIdleTimeout() }) + <-time.After(defaultTestIdleTimeout) + m.handleIdleTimeout() + }() + for j := 0; j < 100; j++ { + wg.Add(1) + go func() { + defer wg.Done() + // Wait for the configured idle timeout and simulate an RPC to + // race with the idle timeout timer callback. + <-time.After(defaultTestIdleTimeout) + if err := mgr.onCallBegin(); err != nil { + t.Errorf("onCallBegin() failed: %v", err) + } + atomic.StoreInt32((*int32)(&idlenessState), int32(stateActiveRPCs)) + mgr.onCallEnd() + }() + } + wg.Wait() + }) + } +} diff --git a/install_gae.sh b/install_gae.sh deleted file mode 100755 index 15ff9facdd78..000000000000 --- a/install_gae.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -TMP=$(mktemp -d /tmp/sdk.XXX) \ -&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ -&& unzip -q $TMP.zip -d $TMP \ -&& export PATH="$PATH:$TMP/go_appengine" \ No newline at end of file diff --git a/interceptor.go b/interceptor.go index 668e0adcf0a9..bb96ef57be89 100644 --- a/interceptor.go +++ b/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/internal/balancer/gracefulswitch/gracefulswitch.go b/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 000000000000..08666f62a7cb --- /dev/null +++ b/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,384 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/internal/balancer/gracefulswitch/gracefulswitch_test.go b/internal/balancer/gracefulswitch/gracefulswitch_test.go new file mode 100644 index 000000000000..265e1f78e12d --- /dev/null +++ b/internal/balancer/gracefulswitch/gracefulswitch_test.go @@ -0,0 +1,1133 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func setup(t *testing.T) (*testutils.TestClientConn, *Balancer) { + tcc := testutils.NewTestClientConn(t) + return tcc, NewBalancer(tcc, balancer.BuildOptions{}) +} + +// TestSuccessfulFirstUpdate tests a basic scenario for the graceful switch load +// balancer, where it is setup with a balancer which should populate the current +// load balancer. Any ClientConn updates should then be forwarded to this +// current load balancer. +func (s) TestSuccessfulFirstUpdate(t *testing.T) { + _, gsb := setup(t) + if err := gsb.SwitchTo(mockBalancerBuilder1{}); err != nil { + t.Fatalf("Balancer.SwitchTo failed with error: %v", err) + } + if gsb.balancerCurrent == nil { + t.Fatal("current balancer not populated after a successful call to SwitchTo()") + } + // This will be used to update the graceful switch balancer. This update + // should simply be forwarded down to the current load balancing policy. + ccs := balancer.ClientConnState{ + BalancerConfig: mockBalancerConfig{}, + } + + // Updating ClientConnState should forward the update exactly as is to the + // current balancer. + if err := gsb.UpdateClientConnState(ccs); err != nil { + t.Fatalf("Balancer.UpdateClientConnState(%v) failed: %v", ccs, err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForClientConnUpdate(ctx, ccs); err != nil { + t.Fatal(err) + } +} + +// TestTwoBalancersSameType tests the scenario where there is a graceful switch +// load balancer setup with a current and pending load balancer of the same +// type. Any ClientConn update should be forwarded to the current lb if there is +// a current lb and no pending lb, and the only the pending lb if the graceful +// switch balancer contains both a current lb and a pending lb. The pending load +// balancer should also swap into current whenever it updates with a +// connectivity state other than CONNECTING. +func (s) TestTwoBalancersSameType(t *testing.T) { + tcc, gsb := setup(t) + // This will be used to update the graceful switch balancer. This update + // should simply be forwarded down to either the current or pending load + // balancing policy. + ccs := balancer.ClientConnState{ + BalancerConfig: mockBalancerConfig{}, + } + + gsb.SwitchTo(mockBalancerBuilder1{}) + gsb.UpdateClientConnState(ccs) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForClientConnUpdate(ctx, ccs); err != nil { + t.Fatal(err) + } + + // The current balancer reporting READY should cause this state + // to be forwarded to the ClientConn. + gsb.balancerCurrent.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &neverErrPicker{}, + }) + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Ready { + t.Fatalf("current balancer reports connectivity state %v, want %v", state, connectivity.Ready) + } + } + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Should receive a never err picker. + if _, err := picker.Pick(balancer.PickInfo{}); err != nil { + t.Fatalf("ClientConn should have received a never err picker from an UpdateState call") + } + } + + // An explicit call to switchTo, even if the same type, should cause the + // balancer to build a new balancer for pending. + gsb.SwitchTo(mockBalancerBuilder1{}) + if gsb.balancerPending == nil { + t.Fatal("pending balancer not populated after another call to SwitchTo()") + } + + // A ClientConn update received should be forwarded to the new pending LB + // policy, and not the current one. + gsb.UpdateClientConnState(ccs) + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForClientConnUpdate(sCtx, ccs); err == nil { + t.Fatal("current balancer received a ClientConn update when there is a pending balancer") + } + if err := gsb.balancerPending.Balancer.(*mockBalancer).waitForClientConnUpdate(ctx, ccs); err != nil { + t.Fatal(err) + } + + // If the pending load balancer reports that is CONNECTING, no update should + // be sent to the ClientConn. + gsb.balancerPending.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + }) + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-tcc.NewStateCh: + t.Fatal("balancerPending reporting CONNECTING should not forward up to the ClientConn") + case <-sCtx.Done(): + } + + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + // If the pending load balancer reports a state other than CONNECTING, the + // pending load balancer is logically warmed up, and the ClientConn should + // be updated with the State and Picker to start using the new policy. The + // pending load balancing policy should also be switched into the current + // load balancer. + gsb.balancerPending.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &neverErrPicker{}, + }) + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Ready { + t.Fatalf("pending balancer reports connectivity state %v, want %v", state, connectivity.Ready) + } + } + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // This picker should be the recent one sent from UpdateState(), a never + // err picker, not the nil picker from two updateState() calls previous. + if picker == nil { + t.Fatalf("ClientConn should have received a never err picker, which is the most recent picker, from an UpdateState call") + } + if _, err := picker.Pick(balancer.PickInfo{}); err != nil { + t.Fatalf("ClientConn should have received a never err picker, which is the most recent picker, from an UpdateState call") + } + } + // The current balancer should be closed as a result of the swap. + if err := currBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } +} + +// TestCurrentNotReadyPendingUpdate tests the scenario where there is a current +// and pending load balancer setup in the graceful switch load balancer, and the +// current LB is not in the connectivity state READY. Any update from the +// pending load balancer should cause the graceful switch load balancer to swap +// the pending into current, and update the ClientConn with the pending load +// balancers state. +func (s) TestCurrentNotReadyPendingUpdate(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + gsb.SwitchTo(mockBalancerBuilder1{}) + if gsb.balancerPending == nil { + t.Fatal("pending balancer not populated after another call to SwitchTo()") + } + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + // Due to the current load balancer not being in state READY, any update + // from the pending load balancer should cause that update to be forwarded + // to the ClientConn and also cause the pending load balancer to swap into + // the current one. + gsb.balancerPending.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &neverErrPicker{}, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for an UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Connecting { + t.Fatalf("ClientConn received connectivity state %v, want %v (from pending)", state, connectivity.Connecting) + } + } + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for an UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Should receive a never err picker. + if _, err := picker.Pick(balancer.PickInfo{}); err != nil { + t.Fatalf("ClientConn should have received a never err picker from an UpdateState call") + } + } + + // The current balancer should be closed as a result of the swap. + if err := currBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } +} + +// TestCurrentLeavingReady tests the scenario where there is a current and +// pending load balancer setup in the graceful switch load balancer, with the +// current load balancer being in the state READY, and the current load balancer +// then transitions into a state other than READY. This should cause the pending +// load balancer to swap into the current load balancer, and the ClientConn to +// be updated with the cached pending load balancing state. Also, once the +// current is cleared from the graceful switch load balancer, any updates sent +// should be intercepted and not forwarded to the ClientConn, as the balancer +// has already been cleared. +func (s) TestCurrentLeavingReady(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + }) + + gsb.SwitchTo(mockBalancerBuilder2{}) + // Sends CONNECTING, shouldn't make it's way to ClientConn. + gsb.balancerPending.Balancer.(*mockBalancer).updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &neverErrPicker{}, + }) + + // The current balancer leaving READY should cause the pending balancer to + // swap to the current balancer. This swap from current to pending should + // also update the ClientConn with the pending balancers cached state and + // picker. + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Idle, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Connecting { + t.Fatalf("current balancer reports connectivity state %v, want %v", state, connectivity.Connecting) + } + } + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Should receive a never err picker cached from pending LB's updateState() call, which + // was cached. + if _, err := picker.Pick(balancer.PickInfo{}); err != nil { + t.Fatalf("ClientConn should have received a never err picker, the cached picker, from an UpdateState call") + } + } + + // The current balancer should be closed as a result of the swap. + if err := currBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } + + // The current balancer is now cleared from the graceful switch load + // balancer. Thus, any update from the old current should be intercepted by + // the graceful switch load balancer and not forward up to the ClientConn. + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &neverErrPicker{}, + }) + + // This update should not be forwarded to the ClientConn. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-tcc.NewStateCh: + t.Fatal("UpdateState() from a cleared balancer should not make it's way to ClientConn") + } + + if _, err := currBal.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}); err == nil { + t.Fatal("newSubConn() from a cleared balancer should have returned an error") + } + + // This newSubConn call should also not reach the ClientConn. + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-tcc.NewSubConnCh: + t.Fatal("newSubConn() from a cleared balancer should not make it's way to ClientConn") + } +} + +// TestBalancerSubconns tests the SubConn functionality of the graceful switch +// load balancer. This tests the SubConn update flow in both directions, and +// make sure updates end up at the correct component. Also, it tests that on an +// UpdateSubConnState() call from the ClientConn, the graceful switch load +// balancer forwards it to the correct child balancer. +func (s) TestBalancerSubconns(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + gsb.SwitchTo(mockBalancerBuilder2{}) + + // A child balancer creating a new SubConn should eventually be forwarded to + // the ClientConn held by the graceful switch load balancer. + sc1, err := gsb.balancerCurrent.Balancer.(*mockBalancer).newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case sc := <-tcc.NewSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("NewSubConn, want %v, got %v", sc1, sc) + } + } + + // The other child balancer creating a new SubConn should also be eventually + // be forwarded to the ClientConn held by the graceful switch load balancer. + sc2, err := gsb.balancerPending.Balancer.(*mockBalancer).newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case sc := <-tcc.NewSubConnCh: + if !cmp.Equal(sc2, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("NewSubConn, want %v, got %v", sc2, sc) + } + } + scState := balancer.SubConnState{ConnectivityState: connectivity.Ready} + // Updating the SubConnState for sc1 should cause the graceful switch + // balancer to forward the Update to balancerCurrent for sc1, as that is the + // balancer that created this SubConn. + gsb.UpdateSubConnState(sc1, scState) + + // This update should get forwarded to balancerCurrent, as that is the LB + // that created this SubConn. + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForSubConnUpdate(ctx, subConnWithState{sc: sc1, state: scState}); err != nil { + t.Fatal(err) + } + // This update should not get forwarded to balancerPending, as that is not + // the LB that created this SubConn. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := gsb.balancerPending.Balancer.(*mockBalancer).waitForSubConnUpdate(sCtx, subConnWithState{sc: sc1, state: scState}); err == nil { + t.Fatalf("balancerPending should not have received a subconn update for sc1") + } + + // Updating the SubConnState for sc2 should cause the graceful switch + // balancer to forward the Update to balancerPending for sc2, as that is the + // balancer that created this SubConn. + gsb.UpdateSubConnState(sc2, scState) + + // This update should get forwarded to balancerPending, as that is the LB + // that created this SubConn. + if err := gsb.balancerPending.Balancer.(*mockBalancer).waitForSubConnUpdate(ctx, subConnWithState{sc: sc2, state: scState}); err != nil { + t.Fatal(err) + } + + // This update should not get forwarded to balancerCurrent, as that is not + // the LB that created this SubConn. + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := gsb.balancerCurrent.Balancer.(*mockBalancer).waitForSubConnUpdate(sCtx, subConnWithState{sc: sc2, state: scState}); err == nil { + t.Fatalf("balancerCurrent should not have received a subconn update for sc2") + } + + // Updating the addresses for both SubConns and removing both SubConns + // should get forwarded to the ClientConn. + + // Updating the addresses for sc1 should get forwarded to the ClientConn. + gsb.balancerCurrent.Balancer.(*mockBalancer).updateAddresses(sc1, []resolver.Address{}) + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + + // Updating the addresses for sc2 should also get forwarded to the ClientConn. + gsb.balancerPending.Balancer.(*mockBalancer).updateAddresses(sc2, []resolver.Address{}) + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + + // balancerCurrent removing sc1 should get forwarded to the ClientConn. + gsb.balancerCurrent.Balancer.(*mockBalancer).removeSubConn(sc1) + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, sc) + } + } + // balancerPending removing sc2 should get forwarded to the ClientConn. + gsb.balancerPending.Balancer.(*mockBalancer).removeSubConn(sc2) + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc2, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc2, sc) + } + } +} + +// TestBalancerClose tests the graceful switch balancer's Close() functionality. +// From the Close() call, the graceful switch balancer should remove any created +// Subconns and Close() the current and pending load balancers. This Close() +// call should also cause any other events (calls to entrance functions) to be +// no-ops. +func (s) TestBalancerClose(t *testing.T) { + // Setup gsb balancer with current, pending, and one created SubConn on both + // current and pending. + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + gsb.SwitchTo(mockBalancerBuilder2{}) + + sc1, err := gsb.balancerCurrent.Balancer.(*mockBalancer).newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) // Will eventually get back a SubConn with an identifying property id 1 + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case <-tcc.NewSubConnCh: + } + + sc2, err := gsb.balancerPending.Balancer.(*mockBalancer).newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) // Will eventually get back a SubConn with an identifying property id 2 + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case <-tcc.NewSubConnCh: + } + + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + pendBal := gsb.balancerPending.Balancer.(*mockBalancer) + + // Closing the graceful switch load balancer should lead to removing any + // created SubConns, and closing both the current and pending load balancer. + gsb.Close() + + // The order of SubConns the graceful switch load balancer tells the Client + // Conn to remove is non deterministic, as it is stored in a map. However, + // the first SubConn removed should be either sc1 or sc2. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + if !cmp.Equal(sc2, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want either %v or %v, got %v", sc1, sc2, sc) + } + } + } + + // The graceful switch load balancer should then tell the ClientConn to + // remove the other SubConn. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + if !cmp.Equal(sc2, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want either %v or %v, got %v", sc1, sc2, sc) + } + } + } + + // The current balancer should get closed as a result of the graceful switch balancer being closed. + if err := currBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } + // The pending balancer should also get closed as a result of the graceful switch balancer being closed. + if err := pendBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } + + // Once the graceful switch load balancer has been closed, any entrance + // function should be a no-op and return errBalancerClosed if the function + // returns an error. + + // SwitchTo() should return an error due to the graceful switch load + // balancer having been closed already. + if err := gsb.SwitchTo(mockBalancerBuilder1{}); err != errBalancerClosed { + t.Fatalf("gsb.SwitchTo(%v) returned error %v, want %v", mockBalancerBuilder1{}, err, errBalancerClosed) + } + + // UpdateClientConnState() should return an error due to the graceful switch + // load balancer having been closed already. + ccs := balancer.ClientConnState{ + BalancerConfig: mockBalancerConfig{}, + } + if err := gsb.UpdateClientConnState(ccs); err != errBalancerClosed { + t.Fatalf("gsb.UpdateCLientConnState(%v) returned error %v, want %v", ccs, err, errBalancerClosed) + } + + // After the graceful switch load balancer has been closed, any resolver error + // shouldn't forward to either balancer, as the resolver error is a no-op + // and also even if not, the balancers should have been cleared from the + // graceful switch load balancer. + gsb.ResolverError(balancer.ErrBadResolverState) + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := currBal.waitForResolverError(sCtx, balancer.ErrBadResolverState); !strings.Contains(err.Error(), sCtx.Err().Error()) { + t.Fatal("the current balancer should not have received the resolver error after close") + } + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := pendBal.waitForResolverError(sCtx, balancer.ErrBadResolverState); !strings.Contains(err.Error(), sCtx.Err().Error()) { + t.Fatal("the pending balancer should not have received the resolver error after close") + } +} + +// TestResolverError tests the functionality of a Resolver Error. If there is a +// current balancer, but no pending, the error should be forwarded to the +// current balancer. If there is both a current and pending balancer, the error +// should be forwarded to only the pending balancer. +func (s) TestResolverError(t *testing.T) { + _, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + // If there is only a current balancer present, the resolver error should be + // forwarded to the current balancer. + gsb.ResolverError(balancer.ErrBadResolverState) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := currBal.waitForResolverError(ctx, balancer.ErrBadResolverState); err != nil { + t.Fatal(err) + } + + gsb.SwitchTo(mockBalancerBuilder1{}) + + // If there is a pending balancer present, then a resolver error should be + // forwarded to only the pending balancer, not the current. + pendBal := gsb.balancerPending.Balancer.(*mockBalancer) + gsb.ResolverError(balancer.ErrBadResolverState) + + // The Resolver Error should not be forwarded to the current load balancer. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if err := currBal.waitForResolverError(sCtx, balancer.ErrBadResolverState); !strings.Contains(err.Error(), sCtx.Err().Error()) { + t.Fatal("the current balancer should not have received the resolver error after close") + } + + // The Resolver Error should be forwarded to the pending load balancer. + if err := pendBal.waitForResolverError(ctx, balancer.ErrBadResolverState); err != nil { + t.Fatal(err) + } +} + +// TestPendingReplacedByAnotherPending tests the scenario where a graceful +// switch balancer has a current and pending load balancer, and receives a +// SwitchTo() call, which then replaces the pending. This should cause the +// graceful switch balancer to clear pending state, close old pending SubConns, +// and Close() the pending balancer being replaced. +func (s) TestPendingReplacedByAnotherPending(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + }) + + // Populate pending with a SwitchTo() call. + gsb.SwitchTo(mockBalancerBuilder2{}) + + pendBal := gsb.balancerPending.Balancer.(*mockBalancer) + sc1, err := pendBal.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + // This picker never returns an error, which can help this this test verify + // whether this cached state will get cleared on a new pending balancer + // (will replace it with a picker that always errors). + pendBal.updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &neverErrPicker{}, + }) + + // Replace pending with a SwitchTo() call. + gsb.SwitchTo(mockBalancerBuilder2{}) + // The pending balancer being replaced should cause the graceful switch + // balancer to Remove() any created SubConns for the old pending balancer + // and also Close() the old pending balancer. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a RemoveSubConn call on the ClientConn") + case sc := <-tcc.RemoveSubConnCh: + if !cmp.Equal(sc1, sc, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("RemoveSubConn, want %v, got %v", sc1, sc) + } + } + + if err := pendBal.waitForClose(ctx); err != nil { + t.Fatal(err) + } + + // Switching the current out of READY should cause the pending LB to swap + // into current, causing the graceful switch balancer to update the + // ClientConn with the cached pending state. Since the new pending hasn't + // sent an Update, the default state with connectivity state CONNECTING and + // an errPicker should be sent to the ClientConn. + currBal.updateState(balancer.State{ + ConnectivityState: connectivity.Idle, + }) + + // The update should contain a default connectivity state CONNECTING for the + // state of the new pending LB policy. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateState() call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Connecting { + t.Fatalf("UpdateState(), want connectivity state %v, got %v", connectivity.Connecting, state) + } + } + // The update should contain a default picker ErrPicker in the picker sent + // for the state of the new pending LB policy. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateState() call on the ClientConn") + case picker := <-tcc.NewPickerCh: + if _, err := picker.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("ClientConn should have received a never err picker from an UpdateState call") + } + } +} + +// Picker which never errors here for test purposes (can fill up tests further up with this) +type neverErrPicker struct{} + +func (p *neverErrPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, nil +} + +// TestUpdateSubConnStateRace tests the race condition when the graceful switch +// load balancer receives a SubConnUpdate concurrently with an UpdateState() +// call, which can cause the balancer to forward the update to to be closed and +// cleared. The balancer API guarantees to never call any method the balancer +// after a Close() call, and the test verifies that doesn't happen within the +// graceful switch load balancer. +func (s) TestUpdateSubConnStateRace(t *testing.T) { + tcc, gsb := setup(t) + gsb.SwitchTo(verifyBalancerBuilder{}) + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*verifyBalancer) + currBal.t = t + pendBal := gsb.balancerPending.Balancer.(*mockBalancer) + sc, err := currBal.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error constructing newSubConn in gsb: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn call on the ClientConn") + case <-tcc.NewSubConnCh: + } + // Spawn a goroutine that constantly calls UpdateSubConn for the current + // balancer, which will get deleted in this testing goroutine. + finished := make(chan struct{}) + go func() { + for { + select { + case <-finished: + return + default: + } + gsb.UpdateSubConnState(sc, balancer.SubConnState{ + ConnectivityState: connectivity.Ready, + }) + } + }() + time.Sleep(time.Millisecond) + // This UpdateState call causes current to be closed/cleared. + pendBal.updateState(balancer.State{ + ConnectivityState: connectivity.Ready, + }) + // From this, either one of two things happen. Either the graceful switch + // load balancer doesn't Close() the current balancer before it forwards the + // SubConn update to the child, and the call gets forwarded down to the + // current balancer, or it can Close() the current balancer in between + // reading the balancer pointer and writing to it, and in that case the old + // current balancer should not be updated, as the balancer has already been + // closed and the balancer API guarantees it. + close(finished) +} + +// TestInlineCallbackInBuild tests the scenario where a balancer calls back into +// the balancer.ClientConn API inline from it's build function. +func (s) TestInlineCallbackInBuild(t *testing.T) { + tcc, gsb := setup(t) + // This build call should cause all of the inline updates to forward to the + // ClientConn. + gsb.SwitchTo(buildCallbackBalancerBuilder{}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateState() call on the ClientConn") + case <-tcc.NewStateCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn() call on the ClientConn") + case <-tcc.NewSubConnCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses() call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an RemoveSubConn() call on the ClientConn") + case <-tcc.RemoveSubConnCh: + } + oldCurrent := gsb.balancerCurrent.Balancer.(*buildCallbackBal) + + // Since the callback reports a state READY, this new inline balancer should + // be swapped to the current. + gsb.SwitchTo(buildCallbackBalancerBuilder{}) + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateState() call on the ClientConn") + case <-tcc.NewStateCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an NewSubConn() call on the ClientConn") + case <-tcc.NewSubConnCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an UpdateAddresses() call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for an RemoveSubConn() call on the ClientConn") + case <-tcc.RemoveSubConnCh: + } + + // The current balancer should be closed as a result of the swap. + if err := oldCurrent.waitForClose(ctx); err != nil { + t.Fatalf("error waiting for balancer close: %v", err) + } + + // The old balancer should be deprecated and any calls from it should be a no-op. + oldCurrent.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + select { + case <-tcc.NewSubConnCh: + t.Fatal("Deprecated LB calling NewSubConn() should not forward up to the ClientConn") + case <-sCtx.Done(): + } +} + +// TestExitIdle tests the ExitIdle operation on the Graceful Switch Balancer for +// both possible codepaths, one where the child implements ExitIdler interface +// and one where the child doesn't implement ExitIdler interface. +func (s) TestExitIdle(t *testing.T) { + _, gsb := setup(t) + // switch to a balancer that implements ExitIdle{} (will populate current). + gsb.SwitchTo(mockBalancerBuilder1{}) + currBal := gsb.balancerCurrent.Balancer.(*mockBalancer) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // exitIdle on the Graceful Switch Balancer should get forwarded to the + // current child as it implements exitIdle. + gsb.ExitIdle() + if err := currBal.waitForExitIdle(ctx); err != nil { + t.Fatal(err) + } + + // switch to a balancer that doesn't implement ExitIdle{} (will populate + // pending). + gsb.SwitchTo(verifyBalancerBuilder{}) + // call exitIdle concurrently with newSubConn to make sure there is not a + // data race. + done := make(chan struct{}) + go func() { + gsb.ExitIdle() + close(done) + }() + pendBal := gsb.balancerPending.Balancer.(*verifyBalancer) + for i := 0; i < 10; i++ { + pendBal.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + } + <-done +} + +const balancerName1 = "mock_balancer_1" +const balancerName2 = "mock_balancer_2" +const verifyBalName = "verifyNoSubConnUpdateAfterCloseBalancer" +const buildCallbackBalName = "callbackInBuildBalancer" + +type mockBalancerBuilder1 struct{} + +func (mockBalancerBuilder1) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &mockBalancer{ + ccsCh: testutils.NewChannel(), + scStateCh: testutils.NewChannel(), + resolverErrCh: testutils.NewChannel(), + closeCh: testutils.NewChannel(), + exitIdleCh: testutils.NewChannel(), + cc: cc, + } +} + +func (mockBalancerBuilder1) Name() string { + return balancerName1 +} + +type mockBalancerConfig struct { + serviceconfig.LoadBalancingConfig +} + +// mockBalancer is a fake balancer used to verify different actions from +// the gracefulswitch. It contains a bunch of channels to signal different events +// to the test. +type mockBalancer struct { + // ccsCh is a channel used to signal the receipt of a ClientConn update. + ccsCh *testutils.Channel + // scStateCh is a channel used to signal the receipt of a SubConn update. + scStateCh *testutils.Channel + // resolverErrCh is a channel used to signal a resolver error. + resolverErrCh *testutils.Channel + // closeCh is a channel used to signal the closing of this balancer. + closeCh *testutils.Channel + // exitIdleCh is a channel used to signal the receipt of an ExitIdle call. + exitIdleCh *testutils.Channel + // Hold onto ClientConn wrapper to communicate with it + cc balancer.ClientConn +} + +type subConnWithState struct { + sc balancer.SubConn + state balancer.SubConnState +} + +func (mb1 *mockBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + // Need to verify this call...use a channel?...all of these will need verification + mb1.ccsCh.Send(ccs) + return nil +} + +func (mb1 *mockBalancer) ResolverError(err error) { + mb1.resolverErrCh.Send(err) +} + +func (mb1 *mockBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + mb1.scStateCh.Send(subConnWithState{sc: sc, state: state}) +} + +func (mb1 *mockBalancer) Close() { + mb1.closeCh.Send(struct{}{}) +} + +func (mb1 *mockBalancer) ExitIdle() { + mb1.exitIdleCh.Send(struct{}{}) +} + +// waitForClientConnUpdate verifies if the mockBalancer receives the +// provided ClientConnState within a reasonable amount of time. +func (mb1 *mockBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS balancer.ClientConnState) error { + ccs, err := mb1.ccsCh.Receive(ctx) + if err != nil { + return fmt.Errorf("error waiting for ClientConnUpdate: %v", err) + } + gotCCS := ccs.(balancer.ClientConnState) + if diff := cmp.Diff(gotCCS, wantCCS, cmpopts.IgnoreFields(resolver.State{}, "Attributes")); diff != "" { + return fmt.Errorf("error in ClientConnUpdate: received unexpected ClientConnState, diff (-got +want): %v", diff) + } + return nil +} + +// waitForSubConnUpdate verifies if the mockBalancer receives the provided +// SubConn update before the context expires. +func (mb1 *mockBalancer) waitForSubConnUpdate(ctx context.Context, wantSCS subConnWithState) error { + scs, err := mb1.scStateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("error waiting for SubConnUpdate: %v", err) + } + gotSCS := scs.(subConnWithState) + if !cmp.Equal(gotSCS, wantSCS, cmp.AllowUnexported(subConnWithState{}, testutils.TestSubConn{})) { + return fmt.Errorf("error in SubConnUpdate: received SubConnState: %+v, want %+v", gotSCS, wantSCS) + } + return nil +} + +// waitForResolverError verifies if the mockBalancer receives the provided +// resolver error before the context expires. +func (mb1 *mockBalancer) waitForResolverError(ctx context.Context, wantErr error) error { + gotErr, err := mb1.resolverErrCh.Receive(ctx) + if err != nil { + return fmt.Errorf("error waiting for resolver error: %v", err) + } + if gotErr != wantErr { + return fmt.Errorf("received resolver error: %v, want %v", gotErr, wantErr) + } + return nil +} + +// waitForClose verifies that the mockBalancer is closed before the context +// expires. +func (mb1 *mockBalancer) waitForClose(ctx context.Context) error { + if _, err := mb1.closeCh.Receive(ctx); err != nil { + return fmt.Errorf("error waiting for Close(): %v", err) + } + return nil +} + +// waitForExitIdle verifies that ExitIdle gets called on the mockBalancer before +// the context expires. +func (mb1 *mockBalancer) waitForExitIdle(ctx context.Context) error { + if _, err := mb1.exitIdleCh.Receive(ctx); err != nil { + return fmt.Errorf("error waiting for ExitIdle(): %v", err) + } + return nil +} + +func (mb1 *mockBalancer) updateState(state balancer.State) { + mb1.cc.UpdateState(state) +} + +func (mb1 *mockBalancer) newSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + return mb1.cc.NewSubConn(addrs, opts) +} + +func (mb1 *mockBalancer) updateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + mb1.cc.UpdateAddresses(sc, addrs) +} + +func (mb1 *mockBalancer) removeSubConn(sc balancer.SubConn) { + mb1.cc.RemoveSubConn(sc) +} + +type mockBalancerBuilder2 struct{} + +func (mockBalancerBuilder2) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &mockBalancer{ + ccsCh: testutils.NewChannel(), + scStateCh: testutils.NewChannel(), + resolverErrCh: testutils.NewChannel(), + closeCh: testutils.NewChannel(), + cc: cc, + } +} + +func (mockBalancerBuilder2) Name() string { + return balancerName2 +} + +type verifyBalancerBuilder struct{} + +func (verifyBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &verifyBalancer{ + closed: grpcsync.NewEvent(), + cc: cc, + } +} + +func (verifyBalancerBuilder) Name() string { + return verifyBalName +} + +// verifyBalancer is a balancer that verifies that after a Close() call, an +// updateSubConnState() call never happens. +type verifyBalancer struct { + closed *grpcsync.Event + // Hold onto the ClientConn wrapper to communicate with it. + cc balancer.ClientConn + // To fail the test if UpdateSubConnState gets called after Close(). + t *testing.T +} + +func (vb *verifyBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + return nil +} + +func (vb *verifyBalancer) ResolverError(err error) {} + +func (vb *verifyBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if vb.closed.HasFired() { + vb.t.Fatal("UpdateSubConnState was called after Close(), which breaks the balancer API") + } +} + +func (vb *verifyBalancer) Close() { + vb.closed.Fire() +} + +func (vb *verifyBalancer) newSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + return vb.cc.NewSubConn(addrs, opts) +} + +type buildCallbackBalancerBuilder struct{} + +func (buildCallbackBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + b := &buildCallbackBal{ + cc: cc, + closeCh: testutils.NewChannel(), + } + b.updateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + }) + sc, err := b.newSubConn([]resolver.Address{}, balancer.NewSubConnOptions{}) + if err != nil { + return nil + } + b.updateAddresses(sc, []resolver.Address{}) + b.removeSubConn(sc) + return b +} + +func (buildCallbackBalancerBuilder) Name() string { + return buildCallbackBalName +} + +type buildCallbackBal struct { + // Hold onto the ClientConn wrapper to communicate with it. + cc balancer.ClientConn + // closeCh is a channel used to signal the closing of this balancer. + closeCh *testutils.Channel +} + +func (bcb *buildCallbackBal) UpdateClientConnState(ccs balancer.ClientConnState) error { + return nil +} + +func (bcb *buildCallbackBal) ResolverError(err error) {} + +func (bcb *buildCallbackBal) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {} + +func (bcb *buildCallbackBal) Close() { + bcb.closeCh.Send(struct{}{}) +} + +func (bcb *buildCallbackBal) updateState(state balancer.State) { + bcb.cc.UpdateState(state) +} + +func (bcb *buildCallbackBal) newSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + return bcb.cc.NewSubConn(addrs, opts) +} + +func (bcb *buildCallbackBal) updateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bcb.cc.UpdateAddresses(sc, addrs) +} + +func (bcb *buildCallbackBal) removeSubConn(sc balancer.SubConn) { + bcb.cc.RemoveSubConn(sc) +} + +// waitForClose verifies that the mockBalancer is closed before the context +// expires. +func (bcb *buildCallbackBal) waitForClose(ctx context.Context) error { + if _, err := bcb.closeCh.Receive(ctx); err != nil { + return err + } + return nil +} diff --git a/internal/balancer/nop/nop.go b/internal/balancer/nop/nop.go new file mode 100644 index 000000000000..0c96f1b81186 --- /dev/null +++ b/internal/balancer/nop/nop.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package nop implements a balancer with all of its balancer operations as +// no-ops, other than returning a Transient Failure Picker on a Client Conn +// update. +package nop + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" +) + +// bal is a balancer with all of its balancer operations as no-ops, other than +// returning a Transient Failure Picker on a Client Conn update. +type bal struct { + cc balancer.ClientConn + err error +} + +// NewBalancer returns a no-op balancer. +func NewBalancer(cc balancer.ClientConn, err error) balancer.Balancer { + return &bal{ + cc: cc, + err: err, + } +} + +// UpdateClientConnState updates the bal's Client Conn with an Error Picker +// and a Connectivity State of TRANSIENT_FAILURE. +func (b *bal) UpdateClientConnState(_ balancer.ClientConnState) error { + b.cc.UpdateState(balancer.State{ + Picker: base.NewErrPicker(b.err), + ConnectivityState: connectivity.TransientFailure, + }) + return nil +} + +// ResolverError is a no-op. +func (b *bal) ResolverError(_ error) {} + +// UpdateSubConnState is a no-op. +func (b *bal) UpdateSubConnState(_ balancer.SubConn, _ balancer.SubConnState) {} + +// Close is a no-op. +func (b *bal) Close() {} diff --git a/internal/balancer/stub/stub.go b/internal/balancer/stub/stub.go index e3757c1a50bc..9fe6d93c9db5 100644 --- a/internal/balancer/stub/stub.go +++ b/internal/balancer/stub/stub.go @@ -19,7 +19,12 @@ // Package stub implements a balancer for testing purposes. package stub -import "google.golang.org/grpc/balancer" +import ( + "encoding/json" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) // BalancerFuncs contains all balancer.Balancer functions with a preceding // *BalancerData parameter for passing additional instance information. Any @@ -28,11 +33,14 @@ type BalancerFuncs struct { // Init is called after ClientConn and BuildOptions are set in // BalancerData. It may be used to initialize BalancerData.Data. Init func(*BalancerData) + // ParseConfig is used for parsing LB configs, if specified. + ParseConfig func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) UpdateClientConnState func(*BalancerData, balancer.ClientConnState) error ResolverError func(*BalancerData, error) UpdateSubConnState func(*BalancerData, balancer.SubConn, balancer.SubConnState) Close func(*BalancerData) + ExitIdle func(*BalancerData) } // BalancerData contains data relevant to a stub balancer. @@ -75,6 +83,12 @@ func (b *bal) Close() { } } +func (b *bal) ExitIdle() { + if b.bf.ExitIdle != nil { + b.bf.ExitIdle(b.bd) + } +} + type bb struct { name string bf BalancerFuncs @@ -90,6 +104,13 @@ func (bb bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. func (bb bb) Name() string { return bb.name } +func (bb bb) ParseConfig(lbCfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + if bb.bf.ParseConfig != nil { + return bb.bf.ParseConfig(lbCfg) + } + return nil, nil +} + // Register registers a stub balancer builder which will call the provided // functions. The name used should be unique. func Register(name string, bf BalancerFuncs) { diff --git a/xds/internal/balancer/balancergroup/balancergroup.go b/internal/balancergroup/balancergroup.go similarity index 76% rename from xds/internal/balancer/balancergroup/balancergroup.go rename to internal/balancergroup/balancergroup.go index 2ec576a4b572..c1f7e75c3ec8 100644 --- a/xds/internal/balancer/balancergroup/balancergroup.go +++ b/internal/balancergroup/balancergroup.go @@ -23,11 +23,9 @@ import ( "sync" "time" - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" - "google.golang.org/grpc/xds/internal/client/load" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/cache" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/resolver" @@ -70,7 +68,7 @@ type subBalancerWrapper struct { ccState *balancer.ClientConnState // The dynamic part of sub-balancer. Only used when balancer group is // started. Gets cleared when sub-balancer is closed. - balancer balancer.Balancer + balancer *gracefulswitch.Balancer } // UpdateState overrides balancer.ClientConn, to keep state and picker. @@ -96,14 +94,27 @@ func (sbc *subBalancerWrapper) updateBalancerStateWithCachedPicker() { } func (sbc *subBalancerWrapper) startBalancer() { - b := sbc.builder.Build(sbc, sbc.buildOpts) - sbc.group.logger.Infof("Created child policy %p of type %v", b, sbc.builder.Name()) - sbc.balancer = b + if sbc.balancer == nil { + sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) + } + sbc.group.logger.Infof("Creating child policy of type %q for locality %q", sbc.builder.Name(), sbc.id) + sbc.balancer.SwitchTo(sbc.builder) if sbc.ccState != nil { - b.UpdateClientConnState(*sbc.ccState) + sbc.balancer.UpdateClientConnState(*sbc.ccState) } } +// exitIdle invokes the sub-balancer's ExitIdle method. Returns a boolean +// indicating whether or not the operation was completed. +func (sbc *subBalancerWrapper) exitIdle() (complete bool) { + b := sbc.balancer + if b == nil { + return true + } + b.ExitIdle() + return true +} + func (sbc *subBalancerWrapper) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { b := sbc.balancer if b == nil { @@ -149,7 +160,24 @@ func (sbc *subBalancerWrapper) resolverError(err error) { b.ResolverError(err) } +func (sbc *subBalancerWrapper) gracefulSwitch(builder balancer.Builder) { + sbc.builder = builder + b := sbc.balancer + // Even if you get an add and it persists builder but doesn't start + // balancer, this would leave graceful switch being nil, in which we are + // correctly overwriting with the recent builder here as well to use later. + // The graceful switch balancer's presence is an invariant of whether the + // balancer group is closed or not (if closed, nil, if started, present). + if sbc.balancer != nil { + sbc.group.logger.Infof("Switching child policy %v to type %v", sbc.id, sbc.builder.Name()) + b.SwitchTo(sbc.builder) + } +} + func (sbc *subBalancerWrapper) stopBalancer() { + if sbc.balancer == nil { + return + } sbc.balancer.Close() sbc.balancer = nil } @@ -160,20 +188,19 @@ func (sbc *subBalancerWrapper) stopBalancer() { // intended to be used directly as a balancer. It's expected to be used as a // sub-balancer manager by a high level balancer. // -// Updates from ClientConn are forwarded to sub-balancers -// - service config update -// - Not implemented -// - address update -// - subConn state change -// - find the corresponding balancer and forward +// Updates from ClientConn are forwarded to sub-balancers +// - service config update +// - address update +// - subConn state change +// - find the corresponding balancer and forward // -// Actions from sub-balances are forwarded to parent ClientConn -// - new/remove SubConn -// - picker update and health states change -// - sub-pickers are sent to an aggregator provided by the parent, which -// will group them into a group-picker. The aggregated connectivity state is -// also handled by the aggregator. -// - resolveNow +// Actions from sub-balances are forwarded to parent ClientConn +// - new/remove SubConn +// - picker update and health states change +// - sub-pickers are sent to an aggregator provided by the parent, which +// will group them into a group-picker. The aggregated connectivity state is +// also handled by the aggregator. +// - resolveNow // // Sub-balancers are only built when the balancer group is started. If the // balancer group is closed, the sub-balancers are also closed. And it's @@ -183,7 +210,6 @@ type BalancerGroup struct { cc balancer.ClientConn buildOpts balancer.BuildOptions logger *grpclog.PrefixLogger - loadStore load.PerClusterReporter // stateAggregator is where the state/picker updates will be sent to. It's // provided by the parent balancer, to build a picker with all the @@ -238,15 +264,11 @@ var DefaultSubBalancerCloseTimeout = 15 * time.Minute // New creates a new BalancerGroup. Note that the BalancerGroup // needs to be started to work. -// -// TODO(easwars): Pass an options struct instead of N args. -func New(cc balancer.ClientConn, bOpts balancer.BuildOptions, stateAggregator BalancerStateAggregator, loadStore load.PerClusterReporter, logger *grpclog.PrefixLogger) *BalancerGroup { +func New(cc balancer.ClientConn, bOpts balancer.BuildOptions, stateAggregator BalancerStateAggregator, logger *grpclog.PrefixLogger) *BalancerGroup { return &BalancerGroup{ - cc: cc, - buildOpts: bOpts, - logger: logger, - loadStore: loadStore, - + cc: cc, + buildOpts: bOpts, + logger: logger, stateAggregator: stateAggregator, idToBalancerConfig: make(map[string]*subBalancerWrapper), @@ -279,10 +301,22 @@ func (bg *BalancerGroup) Start() { bg.outgoingMu.Unlock() } -// Add adds a balancer built by builder to the group, with given id. -func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { +// AddWithClientConn adds a balancer with the given id to the group. The +// balancer is built with a balancer builder registered with balancerName. The +// given ClientConn is passed to the newly built balancer instead of the +// onepassed to balancergroup.New(). +// +// TODO: Get rid of the existing Add() API and replace it with this. +func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error { + bg.logger.Infof("Adding child policy of type %q for locality %q", balancerName, id) + builder := balancer.Get(balancerName) + if builder == nil { + return fmt.Errorf("unregistered balancer name %q", balancerName) + } + // Store data in static map, and then check to see if bg is started. bg.outgoingMu.Lock() + defer bg.outgoingMu.Unlock() var sbc *subBalancerWrapper // If outgoingStarted is true, search in the cache. Otherwise, cache is // guaranteed to be empty, searching is unnecessary. @@ -307,7 +341,7 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { } if sbc == nil { sbc = &subBalancerWrapper{ - ClientConn: bg.cc, + ClientConn: cc, id: id, group: bg, builder: builder, @@ -324,6 +358,30 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { sbc.updateBalancerStateWithCachedPicker() } bg.idToBalancerConfig[id] = sbc + return nil +} + +// Add adds a balancer built by builder to the group, with given id. +func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { + bg.AddWithClientConn(id, builder.Name(), bg.cc) +} + +// UpdateBuilder updates the builder for a current child, starting the Graceful +// Switch process for that child. +// +// TODO: update this API to take the name of the new builder instead. +func (bg *BalancerGroup) UpdateBuilder(id string, builder balancer.Builder) { + bg.outgoingMu.Lock() + // This does not deal with the balancer cache because this call should come + // after an Add call for a given child balancer. If the child is removed, + // the caller will call Add if the child balancer comes back which would + // then deal with the balancer cache. + sbc := bg.idToBalancerConfig[id] + if sbc == nil { + // simply ignore it if not present, don't error + return + } + sbc.gracefulSwitch(builder) bg.outgoingMu.Unlock() } @@ -333,17 +391,20 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { // closed after timeout. Cleanup work (closing sub-balancer and removing // subconns) will be done after timeout. func (bg *BalancerGroup) Remove(id string) { + bg.logger.Infof("Removing child policy for locality %q", id) bg.outgoingMu.Lock() if sbToRemove, ok := bg.idToBalancerConfig[id]; ok { if bg.outgoingStarted { bg.balancerCache.Add(id, sbToRemove, func() { - // After timeout, when sub-balancer is removed from cache, need - // to close the underlying sub-balancer, and remove all its - // subconns. + // A sub-balancer evicted from the timeout cache needs to closed + // and its subConns need to removed, unconditionally. There is a + // possibility that a sub-balancer might be removed (thereby + // moving it to the cache) around the same time that the + // balancergroup is closed, and by the time we get here the + // balancergroup might be closed. Check for `outgoingStarted == + // true` at that point can lead to a leaked sub-balancer. bg.outgoingMu.Lock() - if bg.outgoingStarted { - sbToRemove.stopBalancer() - } + sbToRemove.stopBalancer() bg.outgoingMu.Unlock() bg.cleanupSubConns(sbToRemove) }) @@ -369,13 +430,23 @@ func (bg *BalancerGroup) cleanupSubConns(config *subBalancerWrapper) { // sub-balancers. for sc, b := range bg.scToSubBalancer { if b == config { - bg.cc.RemoveSubConn(sc) delete(bg.scToSubBalancer, sc) } } bg.incomingMu.Unlock() } +// connect attempts to connect to all subConns belonging to sb. +func (bg *BalancerGroup) connect(sb *subBalancerWrapper) { + bg.incomingMu.Lock() + for sc, b := range bg.scToSubBalancer { + if b == sb { + sc.Connect() + } + } + bg.incomingMu.Unlock() +} + // Following are actions from the parent grpc.ClientConn, forward to sub-balancers. // UpdateSubConnState handles the state for the subconn. It finds the @@ -451,10 +522,6 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver // state, then forward to ClientConn. func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) { bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state) - if bg.loadStore != nil { - // Only wrap the picker to do load reporting if loadStore was set. - state.Picker = newLoadReportPicker(state.Picker, id, bg.loadStore) - } // Send new state to the aggregator, without holding the incomingMu. // incomingMu is to protect all calls to the parent ClientConn, this update @@ -479,6 +546,10 @@ func (bg *BalancerGroup) Close() { } bg.incomingMu.Unlock() + // Clear(true) runs clear function to close sub-balancers in cache. It + // must be called out of outgoing mutex. + bg.balancerCache.Clear(true) + bg.outgoingMu.Lock() if bg.outgoingStarted { bg.outgoingStarted = false @@ -487,57 +558,29 @@ func (bg *BalancerGroup) Close() { } } bg.outgoingMu.Unlock() - // Clear(true) runs clear function to close sub-balancers in cache. It - // must be called out of outgoing mutex. - bg.balancerCache.Clear(true) } -const ( - serverLoadCPUName = "cpu_utilization" - serverLoadMemoryName = "mem_utilization" -) - -type loadReportPicker struct { - p balancer.Picker - - locality string - loadStore load.PerClusterReporter -} - -func newLoadReportPicker(p balancer.Picker, id string, loadStore load.PerClusterReporter) *loadReportPicker { - return &loadReportPicker{ - p: p, - locality: id, - loadStore: loadStore, +// ExitIdle should be invoked when the parent LB policy's ExitIdle is invoked. +// It will trigger this on all sub-balancers, or reconnect their subconns if +// not supported. +func (bg *BalancerGroup) ExitIdle() { + bg.outgoingMu.Lock() + for _, config := range bg.idToBalancerConfig { + if !config.exitIdle() { + bg.connect(config) + } } + bg.outgoingMu.Unlock() } -func (lrp *loadReportPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - res, err := lrp.p.Pick(info) - if err != nil { - return res, err - } - - lrp.loadStore.CallStarted(lrp.locality) - oldDone := res.Done - res.Done = func(info balancer.DoneInfo) { - if oldDone != nil { - oldDone(info) - } - lrp.loadStore.CallFinished(lrp.locality, info.Err) - - load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport) - if !ok { - return - } - lrp.loadStore.CallServerLoad(lrp.locality, serverLoadCPUName, load.CpuUtilization) - lrp.loadStore.CallServerLoad(lrp.locality, serverLoadMemoryName, load.MemUtilization) - for n, d := range load.RequestCost { - lrp.loadStore.CallServerLoad(lrp.locality, n, d) - } - for n, d := range load.Utilization { - lrp.loadStore.CallServerLoad(lrp.locality, n, d) +// ExitIdleOne instructs the sub-balancer `id` to exit IDLE state, if +// appropriate and possible. +func (bg *BalancerGroup) ExitIdleOne(id string) { + bg.outgoingMu.Lock() + if config := bg.idToBalancerConfig[id]; config != nil { + if !config.exitIdle() { + bg.connect(config) } } - return res, err + bg.outgoingMu.Unlock() } diff --git a/internal/balancergroup/balancergroup_test.go b/internal/balancergroup/balancergroup_test.go new file mode 100644 index 000000000000..90c5c20f4158 --- /dev/null +++ b/internal/balancergroup/balancergroup_test.go @@ -0,0 +1,679 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package balancergroup + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedtarget/weightedaggregator" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" +) + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +var ( + rrBuilder = balancer.Get(roundrobin.Name) + testBalancerIDs = []string{"b1", "b2", "b3"} + testBackendAddrs []resolver.Address +) + +const testBackendAddrsCount = 12 + +func init() { + for i := 0; i < testBackendAddrsCount; i++ { + testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)}) + } + + // Disable caching for all tests. It will be re-enabled in caching specific + // tests. + DefaultSubBalancerCloseTimeout = time.Millisecond +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// Create a new balancer group, add balancer and backends, but not start. +// - b1, weight 2, backends [0,1] +// - b2, weight 1, backends [2,3] +// Start the balancer group and check behavior. +// +// Close the balancer group, call add/remove/change weight/change address. +// - b2, weight 3, backends [0,3] +// - b3, weight 1, backends [1,2] +// Start the balancer group again and check for behavior. +func (s) TestBalancerGroup_start_close(t *testing.T) { + cc := testutils.NewTestClientConn(t) + gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) + gator.Start() + bg := New(cc, balancer.BuildOptions{}, gator, nil) + + // Add two balancers to group and send two resolved addresses to both + // balancers. + gator.Add(testBalancerIDs[0], 2) + bg.Add(testBalancerIDs[0], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) + + bg.Start() + + m1 := make(map[resolver.Address]balancer.SubConn) + for i := 0; i < 4; i++ { + addrs := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + m1[addrs[0]] = sc + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + + // Test roundrobin on the last picker. + p1 := <-cc.NewPickerCh + want := []balancer.SubConn{ + m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], + m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], + m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], + } + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + gator.Stop() + bg.Close() + for i := 0; i < 4; i++ { + bg.UpdateSubConnState(<-cc.RemoveSubConnCh, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) + } + + // Add b3, weight 1, backends [1,2]. + gator.Add(testBalancerIDs[2], 1) + bg.Add(testBalancerIDs[2], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:3]}}) + + // Remove b1. + gator.Remove(testBalancerIDs[0]) + bg.Remove(testBalancerIDs[0]) + + // Update b2 to weight 3, backends [0,3]. + gator.UpdateWeight(testBalancerIDs[1], 3) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])}}) + + gator.Start() + bg.Start() + + m2 := make(map[resolver.Address]balancer.SubConn) + for i := 0; i < 4; i++ { + addrs := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + m2[addrs[0]] = sc + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + + // Test roundrobin on the last picker. + p2 := <-cc.NewPickerCh + want = []balancer.SubConn{ + m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], + m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], + m2[testBackendAddrs[1]], m2[testBackendAddrs[2]], + } + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p2)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// Test that balancer group start() doesn't deadlock if the balancer calls back +// into balancer group inline when it gets an update. +// +// The potential deadlock can happen if we +// - hold a lock and send updates to balancer (e.g. update resolved addresses) +// - the balancer calls back (NewSubConn or update picker) in line +// +// The callback will try to hold hte same lock again, which will cause a +// deadlock. +// +// This test starts the balancer group with a test balancer, will updates picker +// whenever it gets an address update. It's expected that start() doesn't block +// because of deadlock. +func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { + const balancerName = "stub-TestBalancerGroup_start_close_deadlock" + stub.Register(balancerName, stub.BalancerFuncs{}) + builder := balancer.Get(balancerName) + + cc := testutils.NewTestClientConn(t) + gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) + gator.Start() + bg := New(cc, balancer.BuildOptions{}, gator, nil) + + gator.Add(testBalancerIDs[0], 2) + bg.Add(testBalancerIDs[0], builder) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], builder) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) + + bg.Start() +} + +func replaceDefaultSubBalancerCloseTimeout(n time.Duration) func() { + old := DefaultSubBalancerCloseTimeout + DefaultSubBalancerCloseTimeout = n + return func() { DefaultSubBalancerCloseTimeout = old } +} + +// initBalancerGroupForCachingTest creates a balancer group, and initialize it +// to be ready for caching tests. +// +// Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer +// is removed later, so the balancer group returned has one sub-balancer in its +// own map, and one sub-balancer in cache. +func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.TestClientConn, map[resolver.Address]balancer.SubConn) { + cc := testutils.NewTestClientConn(t) + gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) + gator.Start() + bg := New(cc, balancer.BuildOptions{}, gator, nil) + + // Add two balancers to group and send two resolved addresses to both + // balancers. + gator.Add(testBalancerIDs[0], 2) + bg.Add(testBalancerIDs[0], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) + + bg.Start() + + m1 := make(map[resolver.Address]balancer.SubConn) + for i := 0; i < 4; i++ { + addrs := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + m1[addrs[0]] = sc + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + + // Test roundrobin on the last picker. + p1 := <-cc.NewPickerCh + want := []balancer.SubConn{ + m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], + m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], + m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], + } + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + gator.Remove(testBalancerIDs[1]) + bg.Remove(testBalancerIDs[1]) + // Don't wait for SubConns to be removed after close, because they are only + // removed after close timeout. + for i := 0; i < 10; i++ { + select { + case <-cc.RemoveSubConnCh: + t.Fatalf("Got request to remove subconn, want no remove subconn (because subconns were still in cache)") + default: + } + time.Sleep(time.Millisecond) + } + // Test roundrobin on the with only sub-balancer0. + p2 := <-cc.NewPickerCh + want = []balancer.SubConn{ + m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], + } + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p2)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + return gator, bg, cc, m1 +} + +// Test that if a sub-balancer is removed, and re-added within close timeout, +// the subConns won't be re-created. +func (s) TestBalancerGroup_locality_caching(t *testing.T) { + defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() + gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) + + // Turn down subconn for addr2, shouldn't get picker update because + // sub-balancer1 was removed. + bg.UpdateSubConnState(addrToSC[testBackendAddrs[2]], balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + for i := 0; i < 10; i++ { + select { + case <-cc.NewPickerCh: + t.Fatalf("Got new picker, want no new picker (because the sub-balancer was removed)") + default: + } + time.Sleep(time.Millisecond) + } + + // Sleep, but sleep less then close timeout. + time.Sleep(time.Millisecond * 100) + + // Re-add sub-balancer-1, because subconns were in cache, no new subconns + // should be created. But a new picker will still be generated, with subconn + // states update to date. + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], rrBuilder) + + p3 := <-cc.NewPickerCh + want := []balancer.SubConn{ + addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], + addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], + // addr2 is down, b2 only has addr3 in READY state. + addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]], + } + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p3)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + for i := 0; i < 10; i++ { + select { + case <-cc.NewSubConnAddrsCh: + t.Fatalf("Got new subconn, want no new subconn (because subconns were still in cache)") + default: + } + time.Sleep(time.Millisecond * 10) + } +} + +// Sub-balancers are put in cache when they are removed. If balancer group is +// closed within close timeout, all subconns should still be rmeoved +// immediately. +func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) { + defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() + _, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) + + bg.Close() + // The balancer group is closed. The subconns should be removed immediately. + removeTimeout := time.After(time.Millisecond * 500) + scToRemove := map[balancer.SubConn]int{ + addrToSC[testBackendAddrs[0]]: 1, + addrToSC[testBackendAddrs[1]]: 1, + addrToSC[testBackendAddrs[2]]: 1, + addrToSC[testBackendAddrs[3]]: 1, + } + for i := 0; i < len(scToRemove); i++ { + select { + case sc := <-cc.RemoveSubConnCh: + c := scToRemove[sc] + if c == 0 { + t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) + } + scToRemove[sc] = c - 1 + case <-removeTimeout: + t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") + } + } +} + +// Sub-balancers in cache will be closed if not re-added within timeout, and +// subConns will be removed. +func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) { + defer replaceDefaultSubBalancerCloseTimeout(time.Second)() + _, _, cc, addrToSC := initBalancerGroupForCachingTest(t) + + // The sub-balancer is not re-added within timeout. The subconns should be + // removed. + removeTimeout := time.After(DefaultSubBalancerCloseTimeout) + scToRemove := map[balancer.SubConn]int{ + addrToSC[testBackendAddrs[2]]: 1, + addrToSC[testBackendAddrs[3]]: 1, + } + for i := 0; i < len(scToRemove); i++ { + select { + case sc := <-cc.RemoveSubConnCh: + c := scToRemove[sc] + if c == 0 { + t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) + } + scToRemove[sc] = c - 1 + case <-removeTimeout: + t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") + } + } +} + +// Wrap the rr builder, so it behaves the same, but has a different name. +type noopBalancerBuilderWrapper struct { + balancer.Builder +} + +func init() { + balancer.Register(&noopBalancerBuilderWrapper{Builder: rrBuilder}) +} + +func (*noopBalancerBuilderWrapper) Name() string { + return "noopBalancerBuilderWrapper" +} + +// After removing a sub-balancer, re-add with same ID, but different balancer +// builder. Old subconns should be removed, and new subconns should be created. +func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) { + defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() + gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) + + // Re-add sub-balancer-1, but with a different balancer builder. The + // sub-balancer was still in cache, but cann't be reused. This should cause + // old sub-balancer's subconns to be removed immediately, and new subconns + // to be created. + gator.Add(testBalancerIDs[1], 1) + bg.Add(testBalancerIDs[1], &noopBalancerBuilderWrapper{rrBuilder}) + + // The cached sub-balancer should be closed, and the subconns should be + // removed immediately. + removeTimeout := time.After(time.Millisecond * 500) + scToRemove := map[balancer.SubConn]int{ + addrToSC[testBackendAddrs[2]]: 1, + addrToSC[testBackendAddrs[3]]: 1, + } + for i := 0; i < len(scToRemove); i++ { + select { + case sc := <-cc.RemoveSubConnCh: + c := scToRemove[sc] + if c == 0 { + t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) + } + scToRemove[sc] = c - 1 + case <-removeTimeout: + t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") + } + } + + bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[4:6]}}) + + newSCTimeout := time.After(time.Millisecond * 500) + scToAdd := map[resolver.Address]int{ + testBackendAddrs[4]: 1, + testBackendAddrs[5]: 1, + } + for i := 0; i < len(scToAdd); i++ { + select { + case addr := <-cc.NewSubConnAddrsCh: + c := scToAdd[addr[0]] + if c == 0 { + t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c) + } + scToAdd[addr[0]] = c - 1 + sc := <-cc.NewSubConnCh + addrToSC[addr[0]] = sc + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + case <-newSCTimeout: + t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed") + } + } + + // Test roundrobin on the new picker. + p3 := <-cc.NewPickerCh + want := []balancer.SubConn{ + addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], + addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], + addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]], + } + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p3)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } +} + +// After removing a sub-balancer, it will be kept in cache. Make sure that this +// sub-balancer's Close is called when the balancer group is closed. +func (s) TestBalancerGroup_CloseStopsBalancerInCache(t *testing.T) { + const balancerName = "stub-TestBalancerGroup_check_close" + closed := make(chan struct{}) + stub.Register(balancerName, stub.BalancerFuncs{Close: func(_ *stub.BalancerData) { + close(closed) + }}) + builder := balancer.Get(balancerName) + + defer replaceDefaultSubBalancerCloseTimeout(time.Second)() + gator, bg, _, _ := initBalancerGroupForCachingTest(t) + + // Add balancer, and remove + gator.Add(testBalancerIDs[2], 1) + bg.Add(testBalancerIDs[2], builder) + gator.Remove(testBalancerIDs[2]) + bg.Remove(testBalancerIDs[2]) + + // Immediately close balancergroup, before the cache timeout. + bg.Close() + + // Make sure the removed child balancer is closed eventually. + select { + case <-closed: + case <-time.After(time.Second * 2): + t.Fatalf("timeout waiting for the child balancer in cache to be closed") + } +} + +// TestBalancerGroupBuildOptions verifies that the balancer.BuildOptions passed +// to the balancergroup at creation time is passed to child policies. +func (s) TestBalancerGroupBuildOptions(t *testing.T) { + const ( + balancerName = "stubBalancer-TestBalancerGroupBuildOptions" + userAgent = "ua" + ) + + // Setup the stub balancer such that we can read the build options passed to + // it in the UpdateClientConnState method. + bOpts := balancer.BuildOptions{ + DialCreds: insecure.NewCredentials(), + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefChannel, 1234, nil), + CustomUserAgent: userAgent, + } + stub.Register(balancerName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + if !cmp.Equal(bd.BuildOptions, bOpts) { + return fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts) + } + return nil + }, + }) + cc := testutils.NewTestClientConn(t) + bg := New(cc, bOpts, nil, nil) + bg.Start() + + // Add the stub balancer build above as a child policy. + balancerBuilder := balancer.Get(balancerName) + bg.Add(testBalancerIDs[0], balancerBuilder) + + // Send an empty clientConn state change. This should trigger the + // verification of the buildOptions being passed to the child policy. + if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{}); err != nil { + t.Fatal(err) + } +} + +func (s) TestBalancerExitIdleOne(t *testing.T) { + const balancerName = "stub-balancer-test-balancergroup-exit-idle-one" + exitIdleCh := make(chan struct{}, 1) + stub.Register(balancerName, stub.BalancerFuncs{ + ExitIdle: func(*stub.BalancerData) { + exitIdleCh <- struct{}{} + }, + }) + cc := testutils.NewTestClientConn(t) + bg := New(cc, balancer.BuildOptions{}, nil, nil) + bg.Start() + defer bg.Close() + + // Add the stub balancer build above as a child policy. + builder := balancer.Get(balancerName) + bg.Add(testBalancerIDs[0], builder) + + // Call ExitIdle on the child policy. + bg.ExitIdleOne(testBalancerIDs[0]) + select { + case <-time.After(time.Second): + t.Fatal("Timeout when waiting for ExitIdle to be invoked on child policy") + case <-exitIdleCh: + } +} + +// TestBalancerGracefulSwitch tests the graceful switch functionality for a +// child of the balancer group. At first, the child is configured as a round +// robin load balancer, and thus should behave accordingly. The test then +// gracefully switches this child to a custom type which only creates a SubConn +// for the second passed in address and also only picks that created SubConn. +// The new aggregated picker should reflect this change for the child. +func (s) TestBalancerGracefulSwitch(t *testing.T) { + cc := testutils.NewTestClientConn(t) + gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) + gator.Start() + bg := New(cc, balancer.BuildOptions{}, gator, nil) + gator.Add(testBalancerIDs[0], 1) + bg.Add(testBalancerIDs[0], rrBuilder) + bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) + + bg.Start() + + m1 := make(map[resolver.Address]balancer.SubConn) + scs := make(map[balancer.SubConn]bool) + for i := 0; i < 2; i++ { + addrs := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + m1[addrs[0]] = sc + scs[sc] = true + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + + p1 := <-cc.NewPickerCh + want := []balancer.SubConn{ + m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], + } + if err := testutils.IsRoundRobin(want, testutils.SubConnFromPicker(p1)); err != nil { + t.Fatalf("want %v, got %v", want, err) + } + + // The balancer type for testBalancersIDs[0] is currently Round Robin. Now, + // change it to a balancer that has separate behavior logically (creating + // SubConn for second address in address list and always picking that + // SubConn), and see if the downstream behavior reflects that change. + bg.UpdateBuilder(testBalancerIDs[0], wrappedPickFirstBalancerBuilder{}) + if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}); err != nil { + t.Fatalf("error updating ClientConn state: %v", err) + } + + addrs := <-cc.NewSubConnAddrsCh + if addrs[0].Addr != testBackendAddrs[3].Addr { + // Verifies forwarded to new created balancer, as the wrapped pick first + // balancer will delete first address. + t.Fatalf("newSubConn called with wrong address, want: %v, got : %v", testBackendAddrs[3].Addr, addrs[0].Addr) + } + sc := <-cc.NewSubConnCh + + // Update the pick first balancers SubConn as CONNECTING. This will cause + // the pick first balancer to UpdateState() with CONNECTING, which shouldn't send + // a Picker update back, as the Graceful Switch process is not complete. + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-cc.NewPickerCh: + t.Fatalf("No new picker should have been sent due to the Graceful Switch process not completing") + case <-ctx.Done(): + } + + // Update the pick first balancers SubConn as READY. This will cause + // the pick first balancer to UpdateState() with READY, which should send a + // Picker update back, as the Graceful Switch process is complete. This + // Picker should always pick the pick first's created SubConn which + // corresponds to address 3. + bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p2 := <-cc.NewPickerCh + pr, err := p2.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("error picking: %v", err) + } + if pr.SubConn != sc { + t.Fatalf("picker.Pick(), want %v, got %v", sc, pr.SubConn) + } + + // The Graceful Switch process completing for the child should cause the + // SubConns for the balancer being gracefully switched from to get deleted. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for i := 0; i < 2; i++ { + select { + case <-ctx.Done(): + t.Fatalf("error waiting for RemoveSubConn()") + case sc := <-cc.RemoveSubConnCh: + // The SubConn removed should have been one of the two created + // SubConns, and both should be deleted. + if ok := scs[sc]; ok { + delete(scs, sc) + continue + } else { + t.Fatalf("RemoveSubConn called for wrong SubConn %v, want in %v", sc, scs) + } + } + } +} + +type wrappedPickFirstBalancerBuilder struct{} + +func (wrappedPickFirstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(grpc.PickFirstBalancerName) + wpfb := &wrappedPickFirstBalancer{ + ClientConn: cc, + } + pf := builder.Build(wpfb, opts) + wpfb.Balancer = pf + return wpfb +} + +func (wrappedPickFirstBalancerBuilder) Name() string { + return "wrappedPickFirstBalancer" +} + +type wrappedPickFirstBalancer struct { + balancer.Balancer + balancer.ClientConn +} + +func (wb *wrappedPickFirstBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + s.ResolverState.Addresses = s.ResolverState.Addresses[1:] + return wb.Balancer.UpdateClientConnState(s) +} + +func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { + // Eat it if IDLE - allows it to switch over only on a READY SubConn. + if state.ConnectivityState == connectivity.Idle { + return + } + wb.ClientConn.UpdateState(state) +} diff --git a/xds/internal/balancer/balancergroup/balancerstateaggregator.go b/internal/balancergroup/balancerstateaggregator.go similarity index 93% rename from xds/internal/balancer/balancergroup/balancerstateaggregator.go rename to internal/balancergroup/balancerstateaggregator.go index 116394385059..816869555323 100644 --- a/xds/internal/balancer/balancergroup/balancerstateaggregator.go +++ b/internal/balancergroup/balancerstateaggregator.go @@ -26,7 +26,7 @@ import ( // state. // // It takes care of merging sub-picker into one picker. The picking config is -// passed directly from the the parent to the aggregator implementation (instead +// passed directly from the parent to the aggregator implementation (instead // via balancer group). type BalancerStateAggregator interface { // UpdateState updates the state of the id. diff --git a/internal/binarylog/binarylog.go b/internal/binarylog/binarylog.go index 5cc3aeddb213..755fdebc1b15 100644 --- a/internal/binarylog/binarylog.go +++ b/internal/binarylog/binarylog.go @@ -28,38 +28,48 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be // built at init time from the configuration (environment variable or flags). // -// It is used to get a methodLogger for each individual method. +// It is used to get a MethodLogger for each individual method. var binLogger Logger -var grpclogLogger = grpclog.Component("binarylog") - -// SetLogger sets the binarg logger. +// SetLogger sets the binary logger. // // Only call this at init time. func SetLogger(l Logger) { binLogger = l } -// GetMethodLogger returns the methodLogger for the given methodName. +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +78,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,83 +110,83 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // -// New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // -// New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } -// getMethodLogger returns the methodLogger for the given methodName. +// getMethodLogger returns the MethodLogger for the given methodName. // // methodName should be in the format of "/service/method". // -// Each methodLogger returned by this method is a new instance. This is to +// Each MethodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/internal/binarylog/binarylog_test.go b/internal/binarylog/binarylog_test.go index cbf2ba0d1bf8..47f6a541e767 100644 --- a/internal/binarylog/binarylog_test.go +++ b/internal/binarylog/binarylog_test.go @@ -93,12 +93,11 @@ func (s) TestGetMethodLogger(t *testing.T) { t.Errorf("in: %q, failed to create logger from config string", tc.in) continue } - ml := l.getMethodLogger(tc.method) + ml := l.GetMethodLogger(tc.method).(*TruncatingMethodLogger) if ml == nil { t.Errorf("in: %q, method logger is nil, want non-nil", tc.in) continue } - if ml.headerMaxLen != tc.hdr || ml.messageMaxLen != tc.msg { t.Errorf("in: %q, want header: %v, message: %v, got header: %v, message: %v", tc.in, tc.hdr, tc.msg, ml.headerMaxLen, ml.messageMaxLen) } @@ -149,7 +148,7 @@ func (s) TestGetMethodLoggerOff(t *testing.T) { t.Errorf("in: %q, failed to create logger from config string", tc.in) continue } - ml := l.getMethodLogger(tc.method) + ml := l.GetMethodLogger(tc.method) if ml != nil { t.Errorf("in: %q, method logger is non-nil, want nil", tc.in) } diff --git a/internal/binarylog/env_config.go b/internal/binarylog/env_config.go index d8f4e7602fde..f9e80e27ab68 100644 --- a/internal/binarylog/env_config.go +++ b/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. @@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger { return l } -// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds // it to the right map in the logger. func (l *logger) fillMethodLoggerWithConfigString(config string) error { // "" is invalid. @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/internal/binarylog/env_config_test.go b/internal/binarylog/env_config_test.go index f67b4fd60326..9f888ad870ea 100644 --- a/internal/binarylog/env_config_test.go +++ b/internal/binarylog/env_config_test.go @@ -36,29 +36,29 @@ func (s) TestNewLoggerFromConfigString(t *testing.T) { c := fmt.Sprintf("*{h:1;m:2},%s{h},%s{m},%s{h;m}", s1+"/*", fullM1, fullM2) l := NewLoggerFromConfigString(c).(*logger) - if l.all.hdr != 1 || l.all.msg != 2 { - t.Errorf("l.all = %#v, want headerLen: 1, messageLen: 2", l.all) + if l.config.All.Header != 1 || l.config.All.Message != 2 { + t.Errorf("l.config.All = %#v, want headerLen: 1, messageLen: 2", l.config.All) } - if ml, ok := l.services[s1]; ok { - if ml.hdr != maxUInt || ml.msg != 0 { - t.Errorf("want maxUInt header, 0 message, got header: %v, message: %v", ml.hdr, ml.msg) + if ml, ok := l.config.Services[s1]; ok { + if ml.Header != maxUInt || ml.Message != 0 { + t.Errorf("want maxUInt header, 0 message, got header: %v, message: %v", ml.Header, ml.Message) } } else { t.Errorf("service/* is not set") } - if ml, ok := l.methods[fullM1]; ok { - if ml.hdr != 0 || ml.msg != maxUInt { - t.Errorf("want 0 header, maxUInt message, got header: %v, message: %v", ml.hdr, ml.msg) + if ml, ok := l.config.Methods[fullM1]; ok { + if ml.Header != 0 || ml.Message != maxUInt { + t.Errorf("want 0 header, maxUInt message, got header: %v, message: %v", ml.Header, ml.Message) } } else { t.Errorf("service/method{h} is not set") } - if ml, ok := l.methods[fullM2]; ok { - if ml.hdr != maxUInt || ml.msg != maxUInt { - t.Errorf("want maxUInt header, maxUInt message, got header: %v, message: %v", ml.hdr, ml.msg) + if ml, ok := l.config.Methods[fullM2]; ok { + if ml.Header != maxUInt || ml.Message != maxUInt { + t.Errorf("want maxUInt header, maxUInt message, got header: %v, message: %v", ml.Header, ml.Message) } } else { t.Errorf("service/method{h;m} is not set") @@ -249,7 +249,7 @@ func (s) TestFillMethodLoggerWithConfigStringBlacklist(t *testing.T) { t.Errorf("returned err %v, want nil", err) continue } - _, ok := l.blacklist[tc] + _, ok := l.config.Blacklist[tc] if !ok { t.Errorf("blacklist[%q] is not set", tc) } @@ -306,15 +306,15 @@ func (s) TestFillMethodLoggerWithConfigStringGlobal(t *testing.T) { t.Errorf("returned err %v, want nil", err) continue } - if l.all == nil { - t.Errorf("l.all is not set") + if l.config.All == nil { + t.Errorf("l.config.All is not set") continue } - if hdr := l.all.hdr; hdr != tc.hdr { + if hdr := l.config.All.Header; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } - if msg := l.all.msg; msg != tc.msg { + if msg := l.config.All.Message; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } @@ -371,16 +371,16 @@ func (s) TestFillMethodLoggerWithConfigStringPerService(t *testing.T) { t.Errorf("returned err %v, want nil", err) continue } - ml, ok := l.services[serviceName] + ml, ok := l.config.Services[serviceName] if !ok { t.Errorf("l.service[%q] is not set", serviceName) continue } - if hdr := ml.hdr; hdr != tc.hdr { + if hdr := ml.Header; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } - if msg := ml.msg; msg != tc.msg { + if msg := ml.Message; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } @@ -441,16 +441,16 @@ func (s) TestFillMethodLoggerWithConfigStringPerMethod(t *testing.T) { t.Errorf("returned err %v, want nil", err) continue } - ml, ok := l.methods[fullMethodName] + ml, ok := l.config.Methods[fullMethodName] if !ok { - t.Errorf("l.methods[%q] is not set", fullMethodName) + t.Errorf("l.config.Methods[%q] is not set", fullMethodName) continue } - if hdr := ml.hdr; hdr != tc.hdr { + if hdr := ml.Header; hdr != tc.hdr { t.Errorf("header length = %v, want %v", hdr, tc.hdr) } - if msg := ml.msg; msg != tc.msg { + if msg := ml.Message; msg != tc.msg { t.Errorf("message length = %v, want %v", msg, tc.msg) } } diff --git a/internal/binarylog/method_logger.go b/internal/binarylog/method_logger.go index 0cdb41831509..6c3f632215fd 100644 --- a/internal/binarylog/method_logger.go +++ b/internal/binarylog/method_logger.go @@ -19,6 +19,7 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" @@ -26,7 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -48,7 +49,16 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +67,12 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +83,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -78,18 +94,22 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -108,7 +128,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -119,7 +139,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -131,8 +151,11 @@ func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -146,10 +169,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -157,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -182,19 +205,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -210,7 +233,7 @@ type ClientMessage struct { Message interface{} } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -225,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -250,7 +273,7 @@ type ServerMessage struct { Message interface{} } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -265,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -287,15 +310,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -311,7 +334,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -327,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -339,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -354,15 +377,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -379,15 +402,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -397,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/internal/binarylog/method_logger_test.go b/internal/binarylog/method_logger_test.go index a99360bd92df..11255bb338b4 100644 --- a/internal/binarylog/method_logger_test.go +++ b/internal/binarylog/method_logger_test.go @@ -20,21 +20,22 @@ package binarylog import ( "bytes" + "context" "fmt" "net" "testing" "time" "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/ptypes/duration" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" ) func (s) TestLog(t *testing.T) { idGen.reset() - ml := newMethodLogger(10, 10) + ml := NewTruncatingMethodLogger(10, 10) // Set sink to testing buffer. buf := bytes.NewBuffer(nil) ml.sink = newWriterSink(buf) @@ -46,7 +47,7 @@ func (s) TestLog(t *testing.T) { port6 := 796 tcpAddr6, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("[%v]:%d", addr6, port6)) - testProtoMsg := &pb.Message{ + testProtoMsg := &binlogpb.Message{ Length: 1, Data: []byte{'a'}, } @@ -54,7 +55,7 @@ func (s) TestLog(t *testing.T) { testCases := []struct { config LogEntryConfig - want *pb.GrpcLogEntry + want *binlogpb.GrpcLogEntry }{ { config: &ClientHeader{ @@ -67,31 +68,31 @@ func (s) TestLog(t *testing.T) { Timeout: 2*time.Second + 3*time.Nanosecond, PeerAddr: tcpAddr, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: &pb.ClientHeader{ - Metadata: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: &binlogpb.ClientHeader{ + Metadata: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, }, MethodName: "testservice/testmethod", Authority: "test.service.io", - Timeout: &dpb.Duration{ + Timeout: &durationpb.Duration{ Seconds: 2, Nanos: 3, }, }, }, PayloadTruncated: false, - Peer: &pb.Address{ - Type: pb.Address_TYPE_IPV4, + Peer: &binlogpb.Address{ + Type: binlogpb.Address_TYPE_IPV4, Address: addr, IpPort: uint32(port), }, @@ -103,15 +104,15 @@ func (s) TestLog(t *testing.T) { MethodName: "testservice/testmethod", Authority: "test.service.io", }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: &pb.ClientHeader{ - Metadata: &pb.Metadata{}, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: &binlogpb.ClientHeader{ + Metadata: &binlogpb.Metadata{}, MethodName: "testservice/testmethod", Authority: "test.service.io", }, @@ -127,16 +128,16 @@ func (s) TestLog(t *testing.T) { }, PeerAddr: tcpAddr6, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ - Metadata: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ + Metadata: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, @@ -144,8 +145,8 @@ func (s) TestLog(t *testing.T) { }, }, PayloadTruncated: false, - Peer: &pb.Address{ - Type: pb.Address_TYPE_IPV6, + Peer: &binlogpb.Address{ + Type: binlogpb.Address_TYPE_IPV6, Address: addr6, IpPort: uint32(port6), }, @@ -156,14 +157,14 @@ func (s) TestLog(t *testing.T) { OnClientSide: true, Message: testProtoMsg, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(testProtoBytes)), Data: testProtoBytes, }, @@ -177,14 +178,14 @@ func (s) TestLog(t *testing.T) { OnClientSide: false, Message: testProtoMsg, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(testProtoBytes)), Data: testProtoBytes, }, @@ -197,12 +198,12 @@ func (s) TestLog(t *testing.T) { config: &ClientHalfClose{ OnClientSide: false, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, Payload: nil, PayloadTruncated: false, Peer: nil, @@ -214,23 +215,23 @@ func (s) TestLog(t *testing.T) { Err: status.Errorf(codes.Unavailable, "test"), PeerAddr: tcpAddr, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ - Metadata: &pb.Metadata{}, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ + Metadata: &binlogpb.Metadata{}, StatusCode: uint32(codes.Unavailable), StatusMessage: "test", StatusDetails: nil, }, }, PayloadTruncated: false, - Peer: &pb.Address{ - Type: pb.Address_TYPE_IPV4, + Peer: &binlogpb.Address{ + Type: binlogpb.Address_TYPE_IPV4, Address: addr, IpPort: uint32(port), }, @@ -240,15 +241,15 @@ func (s) TestLog(t *testing.T) { config: &ServerTrailer{ OnClientSide: true, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ - Metadata: &pb.Metadata{}, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ + Metadata: &binlogpb.Metadata{}, StatusCode: uint32(codes.OK), StatusMessage: "", StatusDetails: nil, @@ -262,12 +263,12 @@ func (s) TestLog(t *testing.T) { config: &Cancel{ OnClientSide: true, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, Payload: nil, PayloadTruncated: false, Peer: nil, @@ -284,16 +285,16 @@ func (s) TestLog(t *testing.T) { "a": {"b", "bb"}, }, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_SERVER, - Payload: &pb.GrpcLogEntry_ClientHeader{ - ClientHeader: &pb.ClientHeader{ - Metadata: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_SERVER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: &binlogpb.ClientHeader{ + Metadata: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, @@ -312,16 +313,16 @@ func (s) TestLog(t *testing.T) { "a": {"b", "bb"}, }, }, - want: &pb.GrpcLogEntry{ + want: &binlogpb.GrpcLogEntry{ Timestamp: nil, CallId: 1, SequenceIdWithinCall: 0, - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Logger: pb.GrpcLogEntry_LOGGER_CLIENT, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ - Metadata: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Logger: binlogpb.GrpcLogEntry_LOGGER_CLIENT, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ + Metadata: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "a", Value: []byte{'b'}}, {Key: "a", Value: []byte{'b', 'b'}}, }, @@ -335,8 +336,8 @@ func (s) TestLog(t *testing.T) { for i, tc := range testCases { buf.Reset() tc.want.SequenceIdWithinCall = uint64(i + 1) - ml.Log(tc.config) - inSink := new(pb.GrpcLogEntry) + ml.Log(context.Background(), tc.config) + inSink := new(binlogpb.GrpcLogEntry) if err := proto.Unmarshal(buf.Bytes()[4:], inSink); err != nil { t.Errorf("failed to unmarshal bytes in sink to proto: %v", err) continue @@ -350,45 +351,45 @@ func (s) TestLog(t *testing.T) { func (s) TestTruncateMetadataNotTruncated(t *testing.T) { testCases := []struct { - ml *MethodLogger - mpPb *pb.Metadata + ml *TruncatingMethodLogger + mpPb *binlogpb.Metadata }{ { - ml: newMethodLogger(maxUInt, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(maxUInt, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, }, }, }, { - ml: newMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(2, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, }, }, }, { - ml: newMethodLogger(1, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(1, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: nil}, }, }, }, { - ml: newMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(2, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1, 1}}, }, }, }, { - ml: newMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(2, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, }, @@ -397,9 +398,9 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { // "grpc-trace-bin" is kept in log but not counted towards the size // limit. { - ml: newMethodLogger(1, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(1, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "grpc-trace-bin", Value: []byte("some.trace.key")}, }, @@ -417,24 +418,24 @@ func (s) TestTruncateMetadataNotTruncated(t *testing.T) { func (s) TestTruncateMetadataTruncated(t *testing.T) { testCases := []struct { - ml *MethodLogger - mpPb *pb.Metadata + ml *TruncatingMethodLogger + mpPb *binlogpb.Metadata entryLen int }{ { - ml: newMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(2, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1, 1, 1}}, }, }, entryLen: 0, }, { - ml: newMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(2, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1}}, @@ -443,9 +444,9 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { entryLen: 2, }, { - ml: newMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(2, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1, 1}}, {Key: "", Value: []byte{1}}, }, @@ -453,9 +454,9 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { entryLen: 1, }, { - ml: newMethodLogger(2, maxUInt), - mpPb: &pb.Metadata{ - Entry: []*pb.MetadataEntry{ + ml: NewTruncatingMethodLogger(2, maxUInt), + mpPb: &binlogpb.Metadata{ + Entry: []*binlogpb.MetadataEntry{ {Key: "", Value: []byte{1}}, {Key: "", Value: []byte{1, 1}}, }, @@ -478,24 +479,24 @@ func (s) TestTruncateMetadataTruncated(t *testing.T) { func (s) TestTruncateMessageNotTruncated(t *testing.T) { testCases := []struct { - ml *MethodLogger - msgPb *pb.Message + ml *TruncatingMethodLogger + msgPb *binlogpb.Message }{ { - ml: newMethodLogger(maxUInt, maxUInt), - msgPb: &pb.Message{ + ml: NewTruncatingMethodLogger(maxUInt, maxUInt), + msgPb: &binlogpb.Message{ Data: []byte{1}, }, }, { - ml: newMethodLogger(maxUInt, 3), - msgPb: &pb.Message{ + ml: NewTruncatingMethodLogger(maxUInt, 3), + msgPb: &binlogpb.Message{ Data: []byte{1, 1}, }, }, { - ml: newMethodLogger(maxUInt, 2), - msgPb: &pb.Message{ + ml: NewTruncatingMethodLogger(maxUInt, 2), + msgPb: &binlogpb.Message{ Data: []byte{1, 1}, }, }, @@ -511,14 +512,14 @@ func (s) TestTruncateMessageNotTruncated(t *testing.T) { func (s) TestTruncateMessageTruncated(t *testing.T) { testCases := []struct { - ml *MethodLogger - msgPb *pb.Message + ml *TruncatingMethodLogger + msgPb *binlogpb.Message oldLength uint32 }{ { - ml: newMethodLogger(maxUInt, 2), - msgPb: &pb.Message{ + ml: NewTruncatingMethodLogger(maxUInt, 2), + msgPb: &binlogpb.Message{ Length: 3, Data: []byte{1, 1, 1}, }, diff --git a/internal/binarylog/sink.go b/internal/binarylog/sink.go index 7d7a3056b71e..264de387c2a5 100644 --- a/internal/binarylog/sink.go +++ b/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,10 +66,11 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { - grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err) + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err } hdr := make([]byte, 4) binary.BigEndian.PutUint32(hdr, uint32(len(b))) @@ -85,24 +86,27 @@ func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { func (ws *writerSink) Close() error { return nil } type bufferedSink struct { - mu sync.Mutex - closer io.Closer - out Sink // out is built on buf. - buf *bufio.Writer // buf is kept for flush. - - writeStartOnce sync.Once - writeTicker *time.Ticker + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} } -func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { - // Start the write loop when Write is called. - fs.writeStartOnce.Do(fs.startFlushGoroutine) +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } if err := fs.out.Write(e); err != nil { - fs.mu.Unlock() return err } - fs.mu.Unlock() return nil } @@ -113,7 +117,12 @@ const ( func (fs *bufferedSink) startFlushGoroutine() { fs.writeTicker = time.NewTicker(bufFlushDuration) go func() { - for range fs.writeTicker.C { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } fs.mu.Lock() if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) @@ -124,10 +133,12 @@ func (fs *bufferedSink) startFlushGoroutine() { } func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() if fs.writeTicker != nil { fs.writeTicker.Stop() } - fs.mu.Lock() + close(fs.done) if err := fs.buf.Flush(); err != nil { grpclogLogger.Warningf("failed to flush to Sink: %v", err) } @@ -137,7 +148,6 @@ func (fs *bufferedSink) Close() error { if err := fs.out.Close(); err != nil { grpclogLogger.Warningf("failed to close the Sink: %v", err) } - fs.mu.Unlock() return nil } @@ -155,5 +165,6 @@ func NewBufferedSink(o io.WriteCloser) Sink { closer: o, out: newWriterSink(bufW), buf: bufW, + done: make(chan struct{}), } } diff --git a/internal/buffer/unbounded.go b/internal/buffer/unbounded.go index 9f6a0c1200db..81c2f5fd761b 100644 --- a/internal/buffer/unbounded.go +++ b/internal/buffer/unbounded.go @@ -35,6 +35,7 @@ import "sync" // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} + closed bool mu sync.Mutex backlog []interface{} } @@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded { // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. func (b *Unbounded) Get() <-chan interface{} { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/internal/buffer/unbounded_test.go b/internal/buffer/unbounded_test.go index 8cb800dd0f09..1708391e7f27 100644 --- a/internal/buffer/unbounded_test.go +++ b/internal/buffer/unbounded_test.go @@ -119,3 +119,19 @@ func (s) TestMultipleWriters(t *testing.T) { t.Errorf("reads: %#v, wantReads: %#v", reads, wantReads) } } + +// TestClose closes the buffer and makes sure that nothing is sent after the +// buffer is closed. +func (s) TestClose(t *testing.T) { + ub := NewUnbounded() + ub.Close() + if v, ok := <-ub.Get(); ok { + t.Errorf("Unbounded.Get() = %v, want closed channel", v) + } + ub.Put(1) + ub.Load() + if v, ok := <-ub.Get(); ok { + t.Errorf("Unbounded.Get() = %v, want closed channel", v) + } + ub.Close() +} diff --git a/internal/channelz/funcs.go b/internal/channelz/funcs.go index f7314139303e..777cbcd7921d 100644 --- a/internal/channelz/funcs.go +++ b/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid, ref) - } else { - db.get().addChannel(id, cn, false, pid, ref) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid, ref) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid, ref) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } -// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c @@ -333,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) { c.mu.Unlock() } -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { c.mu.Lock() cn.cm = c cn.trace.cm = c @@ -346,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in c.mu.Unlock() } -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { c.mu.Lock() sc.cm = c sc.trace.cm = c @@ -355,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri c.mu.Unlock() } -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { c.mu.Lock() ls.cm = c c.listenSockets[id] = ls @@ -363,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref c.mu.Unlock() } -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { c.mu.Lock() ns.cm = c c.normalSockets[id] = ns @@ -630,7 +682,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) if count == 0 { end = true } - var s []*SocketMetric + s := make([]*SocketMetric, 0, len(sks)) for _, ns := range sks { sm := &SocketMetric{} sm.SocketData = ns.s.ChannelzMetric() diff --git a/internal/channelz/id.go b/internal/channelz/id.go new file mode 100644 index 000000000000..c9a27acd3710 --- /dev/null +++ b/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/internal/channelz/logging.go b/internal/channelz/logging.go index b0013f9c8865..8e13a3d2ce7b 100644 --- a/internal/channelz/logging.go +++ b/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/internal/channelz/types.go b/internal/channelz/types.go index 3c595d154bd3..7b2f350e2e64 100644 --- a/internal/channelz/types.go +++ b/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/internal/channelz/types_linux.go b/internal/channelz/types_linux.go index 692dd6181778..1b1c4cce34a9 100644 --- a/internal/channelz/types_linux.go +++ b/internal/channelz/types_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/channelz/types_nonlinux.go b/internal/channelz/types_nonlinux.go index 19c2fc521dcf..8b06eed1ab8b 100644 --- a/internal/channelz/types_nonlinux.go +++ b/internal/channelz/types_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -37,6 +38,6 @@ type SocketOptionData struct { // Windows OS doesn't support Socket Option func (s *SocketOptionData) Getsockopt(fd uintptr) { once.Do(func() { - logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.") + logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } diff --git a/internal/channelz/util_linux.go b/internal/channelz/util_linux.go index fdf409d55de3..8d194e44e1dc 100644 --- a/internal/channelz/util_linux.go +++ b/internal/channelz/util_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/channelz/util_nonlinux.go b/internal/channelz/util_nonlinux.go index 8864a0811164..837ddc402400 100644 --- a/internal/channelz/util_nonlinux.go +++ b/internal/channelz/util_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * diff --git a/internal/channelz/util_test.go b/internal/channelz/util_test.go index 3d1a1183fa41..9de6679043d7 100644 --- a/internal/channelz/util_test.go +++ b/internal/channelz/util_test.go @@ -1,4 +1,5 @@ -// +build linux,!appengine +//go:build linux +// +build linux /* * diff --git a/internal/credentials/credentials.go b/internal/credentials/credentials.go new file mode 100644 index 000000000000..32c9b59033cd --- /dev/null +++ b/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/internal/credentials/spiffe.go b/internal/credentials/spiffe.go index be70b6cdfc31..25ade623058e 100644 --- a/internal/credentials/spiffe.go +++ b/internal/credentials/spiffe.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2020 gRPC authors. diff --git a/internal/credentials/spiffe_test.go b/internal/credentials/spiffe_test.go index 599481ad0bf9..0011ed012bbd 100644 --- a/internal/credentials/spiffe_test.go +++ b/internal/credentials/spiffe_test.go @@ -22,8 +22,8 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" - "io/ioutil" "net/url" + "os" "testing" "google.golang.org/grpc/internal/grpctest" @@ -209,9 +209,9 @@ func (s) TestSPIFFEIDFromCert(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - data, err := ioutil.ReadFile(testdata.Path(tt.dataPath)) + data, err := os.ReadFile(testdata.Path(tt.dataPath)) if err != nil { - t.Fatalf("ioutil.ReadFile(%s) failed: %v", testdata.Path(tt.dataPath), err) + t.Fatalf("os.ReadFile(%s) failed: %v", testdata.Path(tt.dataPath), err) } block, _ := pem.Decode(data) if block == nil { diff --git a/internal/credentials/syscallconn.go b/internal/credentials/syscallconn.go index f499a614c20e..2919632d657e 100644 --- a/internal/credentials/syscallconn.go +++ b/internal/credentials/syscallconn.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/credentials/syscallconn_test.go b/internal/credentials/syscallconn_test.go index ee17a0ca67bc..b229a47d116e 100644 --- a/internal/credentials/syscallconn_test.go +++ b/internal/credentials/syscallconn_test.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/credentials/util.go b/internal/credentials/util.go index 55664fa46b81..f792fd22cafc 100644 --- a/internal/credentials/util.go +++ b/internal/credentials/util.go @@ -18,7 +18,9 @@ package credentials -import "crypto/tls" +import ( + "crypto/tls" +) const alpnProtoStrH2 = "h2" diff --git a/internal/credentials/xds/handshake_info.go b/internal/credentials/xds/handshake_info.go index ca2e39edd6d1..9fa0c94f41e8 100644 --- a/internal/credentials/xds/handshake_info.go +++ b/internal/credentials/xds/handshake_info.go @@ -31,7 +31,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" ) @@ -43,10 +43,18 @@ func init() { // the Attributes field of resolver.Address. type handshakeAttrKey struct{} +// Equal reports whether the handshake info structs are identical (have the +// same pointer). This is sufficient as all subconns from one CDS balancer use +// the same one. +func (hi *HandshakeInfo) Equal(o interface{}) bool { + oh, ok := o.(*HandshakeInfo) + return ok && oh == hi +} + // SetHandshakeInfo returns a copy of addr in which the Attributes field is // updated with hInfo. func SetHandshakeInfo(addr resolver.Address, hInfo *HandshakeInfo) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(handshakeAttrKey{}, hInfo) + addr.Attributes = addr.Attributes.WithValue(handshakeAttrKey{}, hInfo) return addr } @@ -66,8 +74,8 @@ type HandshakeInfo struct { mu sync.Mutex rootProvider certprovider.Provider identityProvider certprovider.Provider - sanMatchers []xdsinternal.StringMatcher // Only on the client side. - requireClientCert bool // Only on server side. + sanMatchers []matcher.StringMatcher // Only on the client side. + requireClientCert bool // Only on server side. } // SetRootCertProvider updates the root certificate provider. @@ -85,7 +93,7 @@ func (hi *HandshakeInfo) SetIdentityCertProvider(identity certprovider.Provider) } // SetSANMatchers updates the list of SAN matchers. -func (hi *HandshakeInfo) SetSANMatchers(sanMatchers []xdsinternal.StringMatcher) { +func (hi *HandshakeInfo) SetSANMatchers(sanMatchers []matcher.StringMatcher) { hi.mu.Lock() hi.sanMatchers = sanMatchers hi.mu.Unlock() @@ -113,10 +121,10 @@ func (hi *HandshakeInfo) UseFallbackCreds() bool { // GetSANMatchersForTesting returns the SAN matchers stored in HandshakeInfo. // To be used only for testing purposes. -func (hi *HandshakeInfo) GetSANMatchersForTesting() []xdsinternal.StringMatcher { +func (hi *HandshakeInfo) GetSANMatchersForTesting() []matcher.StringMatcher { hi.mu.Lock() defer hi.mu.Unlock() - return append([]xdsinternal.StringMatcher{}, hi.sanMatchers...) + return append([]matcher.StringMatcher{}, hi.sanMatchers...) } // ClientSideTLSConfig constructs a tls.Config to be used in a client-side @@ -138,7 +146,10 @@ func (hi *HandshakeInfo) ClientSideTLSConfig(ctx context.Context) (*tls.Config, // Currently the Go stdlib does complete verification of the cert (which // includes hostname verification) or none. We are forced to go with the // latter and perform the normal cert validation ourselves. - cfg := &tls.Config{InsecureSkipVerify: true} + cfg := &tls.Config{ + InsecureSkipVerify: true, + NextProtos: []string{"h2"}, + } km, err := rootProv.KeyMaterial(ctx) if err != nil { @@ -159,7 +170,10 @@ func (hi *HandshakeInfo) ClientSideTLSConfig(ctx context.Context) (*tls.Config, // ServerSideTLSConfig constructs a tls.Config to be used in a server-side // handshake based on the contents of the HandshakeInfo. func (hi *HandshakeInfo) ServerSideTLSConfig(ctx context.Context) (*tls.Config, error) { - cfg := &tls.Config{ClientAuth: tls.NoClientCert} + cfg := &tls.Config{ + ClientAuth: tls.NoClientCert, + NextProtos: []string{"h2"}, + } hi.mu.Lock() // On the server side, identityProvider is mandatory. RootProvider is // optional based on whether the server is doing TLS or mTLS. diff --git a/internal/credentials/xds/handshake_info_test.go b/internal/credentials/xds/handshake_info_test.go index 81906fa758a1..91257a1925da 100644 --- a/internal/credentials/xds/handshake_info_test.go +++ b/internal/credentials/xds/handshake_info_test.go @@ -25,7 +25,7 @@ import ( "regexp" "testing" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" ) func TestDNSMatch(t *testing.T) { @@ -143,45 +143,45 @@ func TestMatchingSANExists_FailureCases(t *testing.T) { tests := []struct { desc string - sanMatchers []xdsinternal.StringMatcher + sanMatchers []matcher.StringMatcher }{ { desc: "exact match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP("abcd.test.com"), nil, nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(newStringP("http://golang"), nil, nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(newStringP("HTTP://GOLANG.ORG"), nil, nil, nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP("abcd.test.com"), nil, nil, nil, nil, false), + matcher.StringMatcherForTesting(newStringP("http://golang"), nil, nil, nil, nil, false), + matcher.StringMatcherForTesting(newStringP("HTTP://GOLANG.ORG"), nil, nil, nil, nil, false), }, }, { desc: "prefix match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, newStringP("i-aint-the-one"), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, newStringP("FOO.BAR"), nil, nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, newStringP("i-aint-the-one"), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, newStringP("FOO.BAR"), nil, nil, nil, false), }, }, { desc: "suffix match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("i-aint-the-one"), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("1::68"), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP(".COM"), nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, newStringP("i-aint-the-one"), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP("1::68"), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP(".COM"), nil, nil, false), }, }, { desc: "regex match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`.*\.examples\.com`), false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`.*\.examples\.com`), false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), }, }, { desc: "contains match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("i-aint-the-one"), nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("2001:db8:1:1::68"), nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("GRPC"), nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("i-aint-the-one"), nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("2001:db8:1:1::68"), nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("GRPC"), nil, false), }, }, } @@ -216,73 +216,73 @@ func TestMatchingSANExists_Success(t *testing.T) { tests := []struct { desc string - sanMatchers []xdsinternal.StringMatcher + sanMatchers []matcher.StringMatcher }{ { desc: "no san matchers", }, { desc: "exact match dns wildcard", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(newStringP("https://github.com/grpc/grpc-java"), nil, nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(newStringP("abc.example.com"), nil, nil, nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), + matcher.StringMatcherForTesting(newStringP("https://github.com/grpc/grpc-java"), nil, nil, nil, nil, false), + matcher.StringMatcherForTesting(newStringP("abc.example.com"), nil, nil, nil, nil, false), }, }, { desc: "exact match ignore case", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP("FOOBAR@EXAMPLE.COM"), nil, nil, nil, nil, true), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP("FOOBAR@EXAMPLE.COM"), nil, nil, nil, nil, true), }, }, { desc: "prefix match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, newStringP(".co.in"), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, newStringP("baz.test"), nil, nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, newStringP(".co.in"), nil, nil, false), + matcher.StringMatcherForTesting(nil, newStringP("192.168.1.1"), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, newStringP("baz.test"), nil, nil, nil, false), }, }, { desc: "prefix match ignore case", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, newStringP("BAZ.test"), nil, nil, nil, true), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, newStringP("BAZ.test"), nil, nil, nil, true), }, }, { desc: "suffix match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("192.168.1.1"), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("@test.com"), nil, nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), + matcher.StringMatcherForTesting(nil, nil, newStringP("192.168.1.1"), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP("@test.com"), nil, nil, false), }, }, { desc: "suffix match ignore case", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, newStringP("@test.COM"), nil, nil, true), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, newStringP("@test.COM"), nil, nil, true), }, }, { desc: "regex match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("https://github.com/grpc/grpc-java"), nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`.*\.test\.com`), false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("https://github.com/grpc/grpc-java"), nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`192\.[0-9]{1,3}\.1\.1`), false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(`.*\.test\.com`), false), }, }, { desc: "contains match", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP("https://github.com/grpc/grpc-java"), nil, nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("2001:68::db8"), nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("192.0.0"), nil, false), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP("https://github.com/grpc/grpc-java"), nil, nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("2001:68::db8"), nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("192.0.0"), nil, false), }, }, { desc: "contains match ignore case", - sanMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP("GRPC"), nil, true), + sanMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(nil, nil, nil, newStringP("GRPC"), nil, true), }, }, } diff --git a/internal/envconfig/envconfig.go b/internal/envconfig/envconfig.go index 73931a94bcad..77c2c0b89f6c 100644 --- a/internal/envconfig/envconfig.go +++ b/internal/envconfig/envconfig.go @@ -21,18 +21,49 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" -) - var ( - // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". - Retry = strings.EqualFold(os.Getenv(retryStr), "on") // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy, which can be enabled by setting the environment + // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/internal/envconfig/envconfig_test.go b/internal/envconfig/envconfig_test.go new file mode 100644 index 000000000000..68fdf6c73a7f --- /dev/null +++ b/internal/envconfig/envconfig_test.go @@ -0,0 +1,103 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" + "testing" + + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestUint64FromEnv(t *testing.T) { + var testCases = []struct { + name string + val string + def, min, max uint64 + want uint64 + }{ + { + name: "error parsing", + val: "asdf", def: 5, want: 5, + }, { + name: "unset", + val: "", def: 5, want: 5, + }, { + name: "too low", + val: "5", min: 10, want: 10, + }, { + name: "too high", + val: "5", max: 2, want: 2, + }, { + name: "in range", + val: "17391", def: 13000, min: 12000, max: 18000, want: 17391, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + const testVar = "testvar" + if tc.val == "" { + os.Unsetenv(testVar) + } else { + os.Setenv(testVar, tc.val) + } + if got := uint64FromEnv(testVar, tc.def, tc.min, tc.max); got != tc.want { + t.Errorf("uint64FromEnv(%q(=%q), %v, %v, %v) = %v; want %v", testVar, tc.val, tc.def, tc.min, tc.max, got, tc.want) + } + }) + } +} + +func (s) TestBoolFromEnv(t *testing.T) { + var testCases = []struct { + val string + def bool + want bool + }{ + {val: "", def: true, want: true}, + {val: "", def: false, want: false}, + {val: "true", def: true, want: true}, + {val: "true", def: false, want: true}, + {val: "false", def: true, want: false}, + {val: "false", def: false, want: false}, + {val: "asdf", def: true, want: true}, + {val: "asdf", def: false, want: false}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + const testVar = "testvar" + if tc.val == "" { + os.Unsetenv(testVar) + } else { + os.Setenv(testVar, tc.val) + } + if got := boolFromEnv(testVar, tc.def); got != tc.want { + t.Errorf("boolFromEnv(%q(=%q), %v) = %v; want %v", testVar, tc.val, tc.def, got, tc.want) + } + }) + } +} diff --git a/internal/envconfig/observability.go b/internal/envconfig/observability.go new file mode 100644 index 000000000000..dd314cfb18f4 --- /dev/null +++ b/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/internal/envconfig/xds.go b/internal/envconfig/xds.go new file mode 100644 index 000000000000..02b4b6a1c109 --- /dev/null +++ b/internal/envconfig/xds.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be disabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) +) diff --git a/internal/googlecloud/googlecloud.go b/internal/googlecloud/googlecloud.go index d6c9e03fc4c8..6717b757f80d 100644 --- a/internal/googlecloud/googlecloud.go +++ b/internal/googlecloud/googlecloud.go @@ -20,13 +20,6 @@ package googlecloud import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "regexp" "runtime" "strings" "sync" @@ -35,43 +28,9 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const ( - linuxProductNameFile = "/sys/class/dmi/id/product_name" - windowsCheckCommand = "powershell.exe" - windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" - powershellOutputFilter = "Manufacturer" - windowsManufacturerRegex = ":(.*)" - - logPrefix = "[googlecloud]" -) +const logPrefix = "[googlecloud]" var ( - // The following two variables will be reassigned in tests. - runningOS = runtime.GOOS - manufacturerReader = func() (io.Reader, error) { - switch runningOS { - case "linux": - return os.Open(linuxProductNameFile) - case "windows": - cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) - out, err := cmd.Output() - if err != nil { - return nil, err - } - for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { - if strings.HasPrefix(line, powershellOutputFilter) { - re := regexp.MustCompile(windowsManufacturerRegex) - name := re.FindString(line) - name = strings.TrimLeft(name, ":") - return strings.NewReader(name), nil - } - } - return nil, errors.New("cannot determine the machine's manufacturer") - default: - return nil, fmt.Errorf("%s is not supported", runningOS) - } - } - vmOnGCEOnce sync.Once vmOnGCE bool @@ -84,21 +43,21 @@ var ( // package. We keep this to avoid depending on the cloud library module. func OnGCE() bool { vmOnGCEOnce.Do(func() { - vmOnGCE = isRunningOnGCE() + mf, err := manufacturer() + if err != nil { + logger.Infof("failed to read manufacturer, setting onGCE=false: %v") + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) }) return vmOnGCE } -// isRunningOnGCE checks whether the local system, without doing a network request is +// isRunningOnGCE checks whether the local system, without doing a network request, is // running on GCP. -func isRunningOnGCE() bool { - manufacturer, err := readManufacturer() - if err != nil { - logger.Infof("failed to read manufacturer %v, returning OnGCE=false", err) - return false - } +func isRunningOnGCE(manufacturer []byte, goos string) bool { name := string(manufacturer) - switch runningOS { + switch goos { case "linux": name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" @@ -111,18 +70,3 @@ func isRunningOnGCE() bool { return false } } - -func readManufacturer() ([]byte, error) { - reader, err := manufacturerReader() - if err != nil { - return nil, err - } - if reader == nil { - return nil, errors.New("got nil reader") - } - manufacturer, err := ioutil.ReadAll(reader) - if err != nil { - return nil, fmt.Errorf("failed reading %v: %v", linuxProductNameFile, err) - } - return manufacturer, nil -} diff --git a/internal/googlecloud/googlecloud_test.go b/internal/googlecloud/googlecloud_test.go index bd5a42ffab97..69ab2fd4c5f2 100644 --- a/internal/googlecloud/googlecloud_test.go +++ b/internal/googlecloud/googlecloud_test.go @@ -19,68 +19,28 @@ package googlecloud import ( - "io" - "os" - "strings" "testing" ) -func setupManufacturerReader(testOS string, reader func() (io.Reader, error)) func() { - tmpOS := runningOS - tmpReader := manufacturerReader - - // Set test OS and reader function. - runningOS = testOS - manufacturerReader = reader - return func() { - runningOS = tmpOS - manufacturerReader = tmpReader - } -} - -func setup(testOS string, testReader io.Reader) func() { - reader := func() (io.Reader, error) { - return testReader, nil - } - return setupManufacturerReader(testOS, reader) -} - -func setupError(testOS string, err error) func() { - reader := func() (io.Reader, error) { - return nil, err - } - return setupManufacturerReader(testOS, reader) -} - func TestIsRunningOnGCE(t *testing.T) { for _, tc := range []struct { - description string - testOS string - testReader io.Reader - out bool + description string + testOS string + testManufacturer string + out bool }{ // Linux tests. - {"linux: not a GCP platform", "linux", strings.NewReader("not GCP"), false}, - {"Linux: GCP platform (Google)", "linux", strings.NewReader("Google"), true}, - {"Linux: GCP platform (Google Compute Engine)", "linux", strings.NewReader("Google Compute Engine"), true}, - {"Linux: GCP platform (Google Compute Engine) with extra spaces", "linux", strings.NewReader(" Google Compute Engine "), true}, + {"linux: not a GCP platform", "linux", "not GCP", false}, + {"Linux: GCP platform (Google)", "linux", "Google", true}, + {"Linux: GCP platform (Google Compute Engine)", "linux", "Google Compute Engine", true}, + {"Linux: GCP platform (Google Compute Engine) with extra spaces", "linux", " Google Compute Engine ", true}, // Windows tests. - {"windows: not a GCP platform", "windows", strings.NewReader("not GCP"), false}, - {"windows: GCP platform (Google)", "windows", strings.NewReader("Google"), true}, - {"windows: GCP platform (Google) with extra spaces", "windows", strings.NewReader(" Google "), true}, + {"windows: not a GCP platform", "windows", "not GCP", false}, + {"windows: GCP platform (Google)", "windows", "Google", true}, + {"windows: GCP platform (Google) with extra spaces", "windows", " Google ", true}, } { - reverseFunc := setup(tc.testOS, tc.testReader) - if got, want := isRunningOnGCE(), tc.out; got != want { + if got, want := isRunningOnGCE([]byte(tc.testManufacturer), tc.testOS), tc.out; got != want { t.Errorf("%v: isRunningOnGCE()=%v, want %v", tc.description, got, want) } - reverseFunc() - } -} - -func TestIsRunningOnGCENoProductNameFile(t *testing.T) { - reverseFunc := setupError("linux", os.ErrNotExist) - if isRunningOnGCE() { - t.Errorf("ErrNotExist: isRunningOnGCE()=true, want false") } - reverseFunc() } diff --git a/xds/go113.go b/internal/googlecloud/manufacturer.go similarity index 75% rename from xds/go113.go rename to internal/googlecloud/manufacturer.go index 40f82cde5c1e..ffa0f1ddee5d 100644 --- a/xds/go113.go +++ b/internal/googlecloud/manufacturer.go @@ -1,8 +1,9 @@ -// +build go1.13 +//go:build !(linux || windows) +// +build !linux,!windows /* * - * Copyright 2020 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,8 +19,8 @@ * */ -package xds +package googlecloud -import ( - _ "google.golang.org/grpc/credentials/tls/certprovider/meshca" // Register the MeshCA certificate provider plugin. -) +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/internal/googlecloud/manufacturer_linux.go b/internal/googlecloud/manufacturer_linux.go new file mode 100644 index 000000000000..6e455fb0a822 --- /dev/null +++ b/internal/googlecloud/manufacturer_linux.go @@ -0,0 +1,27 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/internal/googlecloud/manufacturer_windows.go b/internal/googlecloud/manufacturer_windows.go new file mode 100644 index 000000000000..2d7aaaaa70fe --- /dev/null +++ b/internal/googlecloud/manufacturer_windows.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package googlecloud + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/internal/grpclog/grpclog.go b/internal/grpclog/grpclog.go index e6f975cbf6a8..b68e26a36493 100644 --- a/internal/grpclog/grpclog.go +++ b/internal/grpclog/grpclog.go @@ -110,17 +110,17 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/internal/grpclog/prefixLogger.go b/internal/grpclog/prefixLogger.go index 82af70e96f15..02224b42ca86 100644 --- a/internal/grpclog/prefixLogger.go +++ b/internal/grpclog/prefixLogger.go @@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { // Debugf does info logging at verbose level 2. func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/internal/grpcrand/grpcrand.go b/internal/grpcrand/grpcrand.go index 200b115ca209..aa97273e7d13 100644 --- a/internal/grpcrand/grpcrand.go +++ b/internal/grpcrand/grpcrand.go @@ -31,26 +31,65 @@ var ( mu sync.Mutex ) +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + // Int63n implements rand.Int63n on the grpcrand global source. func Int63n(n int64) int64 { mu.Lock() - res := r.Int63n(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Int63n(n) } // Intn implements rand.Intn on the grpcrand global source. func Intn(n int) int { mu.Lock() - res := r.Intn(n) - mu.Unlock() - return res + defer mu.Unlock() + return r.Intn(n) +} + +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) } // Float64 implements rand.Float64 on the grpcrand global source. func Float64() float64 { mu.Lock() - res := r.Float64() - mu.Unlock() - return res + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() +} + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) } diff --git a/internal/grpcsync/callback_serializer.go b/internal/grpcsync/callback_serializer.go new file mode 100644 index 000000000000..37b8d4117e77 --- /dev/null +++ b/internal/grpcsync/callback_serializer.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // Done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + Done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + t := &CallbackSerializer{ + Done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go t.run(ctx) + return t +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + t.closedMu.Lock() + defer t.closedMu.Unlock() + + if t.closed { + return false + } + t.callbacks.Put(f) + return true +} + +func (t *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(t.Done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-t.callbacks.Get(): + if !ok { + return + } + t.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing t.Done. + t.closedMu.Lock() + t.closed = true + backlog = t.fetchPendingCallbacks() + t.callbacks.Close() + t.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-t.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + t.callbacks.Load() + default: + return backlog + } + } +} diff --git a/internal/grpcsync/callback_serializer_test.go b/internal/grpcsync/callback_serializer_test.go new file mode 100644 index 000000000000..cdbd446f8101 --- /dev/null +++ b/internal/grpcsync/callback_serializer_test.go @@ -0,0 +1,206 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. +) + +// TestCallbackSerializer_Schedule_FIFO verifies that callbacks are executed in +// the same order in which they were scheduled. +func (s) TestCallbackSerializer_Schedule_FIFO(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + cs := NewCallbackSerializer(ctx) + defer cancel() + + // We have two channels, one to record the order of scheduling, and the + // other to record the order of execution. We spawn a bunch of goroutines + // which record the order of scheduling and call the actual Schedule() + // method as well. The callbacks record the order of execution. + // + // We need to grab a lock to record order of scheduling to guarantee that + // the act of recording and the act of calling Schedule() happen atomically. + const numCallbacks = 100 + var mu sync.Mutex + scheduleOrderCh := make(chan int, numCallbacks) + executionOrderCh := make(chan int, numCallbacks) + for i := 0; i < numCallbacks; i++ { + go func(id int) { + mu.Lock() + defer mu.Unlock() + scheduleOrderCh <- id + cs.Schedule(func(ctx context.Context) { + select { + case <-ctx.Done(): + return + case executionOrderCh <- id: + } + }) + }(i) + } + + // Spawn a couple of goroutines to capture the order or scheduling and the + // order of execution. + scheduleOrder := make([]int, numCallbacks) + executionOrder := make([]int, numCallbacks) + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + for i := 0; i < numCallbacks; i++ { + select { + case <-ctx.Done(): + return + case id := <-scheduleOrderCh: + scheduleOrder[i] = id + } + } + }() + go func() { + defer wg.Done() + for i := 0; i < numCallbacks; i++ { + select { + case <-ctx.Done(): + return + case id := <-executionOrderCh: + executionOrder[i] = id + } + } + }() + wg.Wait() + + if diff := cmp.Diff(executionOrder, scheduleOrder); diff != "" { + t.Fatalf("Callbacks are not executed in scheduled order. diff(-want, +got):\n%s", diff) + } +} + +// TestCallbackSerializer_Schedule_Concurrent verifies that all concurrently +// scheduled callbacks get executed. +func (s) TestCallbackSerializer_Schedule_Concurrent(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + cs := NewCallbackSerializer(ctx) + defer cancel() + + // Schedule callbacks concurrently by calling Schedule() from goroutines. + // The execution of the callbacks call Done() on the waitgroup, which + // eventually unblocks the test and allows it to complete. + const numCallbacks = 100 + var wg sync.WaitGroup + wg.Add(numCallbacks) + for i := 0; i < numCallbacks; i++ { + go func() { + cs.Schedule(func(context.Context) { + wg.Done() + }) + }() + } + + // We call Wait() on the waitgroup from a goroutine so that we can select on + // the Wait() being unblocked and the overall test deadline expiring. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + select { + case <-ctx.Done(): + t.Fatal("Timeout waiting for all scheduled callbacks to be executed") + case <-done: + } +} + +// TestCallbackSerializer_Schedule_Close verifies that callbacks in the queue +// are not executed once Close() returns. +func (s) TestCallbackSerializer_Schedule_Close(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + serializerCtx, serializerCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + cs := NewCallbackSerializer(serializerCtx) + + // Schedule a callback which blocks until the context passed to it is + // canceled. It also closes a channel to signal that it has started. + firstCallbackStartedCh := make(chan struct{}) + cs.Schedule(func(ctx context.Context) { + close(firstCallbackStartedCh) + <-ctx.Done() + }) + + // Schedule a bunch of callbacks. These should be exeuted since the are + // scheduled before the serializer is closed. + const numCallbacks = 10 + callbackCh := make(chan int, numCallbacks) + for i := 0; i < numCallbacks; i++ { + num := i + if !cs.Schedule(func(context.Context) { callbackCh <- num }) { + t.Fatal("Schedule failed to accept a callback when the serializer is yet to be closed") + } + } + + // Ensure that none of the newer callbacks are executed at this point. + select { + case <-time.After(defaultTestShortTimeout): + case <-callbackCh: + t.Fatal("Newer callback executed when older one is still executing") + } + + // Wait for the first callback to start before closing the scheduler. + <-firstCallbackStartedCh + + // Cancel the context which will unblock the first callback. All of the + // other callbacks (which have not started executing at this point) should + // be executed after this. + serializerCancel() + + // Ensure that the newer callbacks are executed. + for i := 0; i < numCallbacks; i++ { + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for callback scheduled before close to be executed") + case num := <-callbackCh: + if num != i { + t.Fatalf("Executing callback %d, want %d", num, i) + } + } + } + <-cs.Done + + done := make(chan struct{}) + if cs.Schedule(func(context.Context) { close(done) }) { + t.Fatal("Scheduled a callback after closing the serializer") + } + + // Ensure that the lates callback is executed at this point. + select { + case <-time.After(defaultTestShortTimeout): + case <-done: + t.Fatal("Newer callback executed when scheduled after closing serializer") + } +} diff --git a/internal/credentials/spiffe_appengine.go b/internal/grpcsync/oncefunc.go similarity index 67% rename from internal/credentials/spiffe_appengine.go rename to internal/grpcsync/oncefunc.go index af6f57719768..6635f7bca96d 100644 --- a/internal/credentials/spiffe_appengine.go +++ b/internal/grpcsync/oncefunc.go @@ -1,8 +1,6 @@ -// +build appengine - /* * - * Copyright 2020 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,14 +16,17 @@ * */ -package credentials +package grpcsync import ( - "crypto/tls" - "net/url" + "sync" ) -// SPIFFEIDFromState is a no-op for appengine builds. -func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { - return nil +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } } diff --git a/internal/grpcsync/oncefunc_test.go b/internal/grpcsync/oncefunc_test.go new file mode 100644 index 000000000000..2b0db8d3eaa3 --- /dev/null +++ b/internal/grpcsync/oncefunc_test.go @@ -0,0 +1,53 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" + "sync/atomic" + "testing" + "time" +) + +// TestOnceFunc tests that a OnceFunc is executed only once even with multiple +// simultaneous callers of it. +func (s) TestOnceFunc(t *testing.T) { + var v int32 + inc := OnceFunc(func() { atomic.AddInt32(&v, 1) }) + + const numWorkers = 100 + var wg sync.WaitGroup // Blocks until all workers have called inc. + wg.Add(numWorkers) + + block := NewEvent() // Signal to worker goroutines to call inc + + for i := 0; i < numWorkers; i++ { + go func() { + <-block.Done() // Wait for a signal. + inc() // Call the OnceFunc. + wg.Done() + }() + } + time.Sleep(time.Millisecond) // Allow goroutines to get to the block. + block.Fire() // Unblock them. + wg.Wait() // Wait for them to complete. + if v != 1 { + t.Fatalf("OnceFunc() called %v times; want 1", v) + } +} diff --git a/internal/grpcsync/pubsub.go b/internal/grpcsync/pubsub.go new file mode 100644 index 000000000000..f58b5ffa6b1e --- /dev/null +++ b/internal/grpcsync/pubsub.go @@ -0,0 +1,136 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg interface{}) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, and +// it is guaranteed that no more subscriber callback will be invoked. +type PubSub struct { + cs *CallbackSerializer + cancel context.CancelFunc + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg interface{} + subscribers map[Subscriber]bool + stopped bool +} + +// NewPubSub returns a new PubSub instance. +func NewPubSub() *PubSub { + ctx, cancel := context.WithCancel(context.Background()) + return &PubSub{ + cs: NewCallbackSerializer(ctx), + cancel: cancel, + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if ps.stopped { + return func() {} + } + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg interface{}) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if ps.stopped { + return + } + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Stop shuts down the PubSub and releases any resources allocated by it. +// It is guaranteed that no subscriber callbacks would be invoked once this +// method returns. +func (ps *PubSub) Stop() { + ps.mu.Lock() + defer ps.mu.Unlock() + ps.stopped = true + + ps.cancel() +} diff --git a/internal/grpcsync/pubsub_test.go b/internal/grpcsync/pubsub_test.go new file mode 100644 index 000000000000..c610f99b2633 --- /dev/null +++ b/internal/grpcsync/pubsub_test.go @@ -0,0 +1,190 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" + "testing" + "time" +) + +type testSubscriber struct { + onMsgCh chan int +} + +func newTestSubscriber(chSize int) *testSubscriber { + return &testSubscriber{onMsgCh: make(chan int, chSize)} +} + +func (ts *testSubscriber) OnMessage(msg interface{}) { + select { + case ts.onMsgCh <- msg.(int): + default: + } +} + +func (s) TestPubSub_PublishNoMsg(t *testing.T) { + pubsub := NewPubSub() + defer pubsub.Stop() + + ts := newTestSubscriber(1) + pubsub.Subscribe(ts) + + select { + case <-ts.onMsgCh: + t.Fatal("Subscriber callback invoked when no message was published") + case <-time.After(defaultTestShortTimeout): + } +} + +func (s) TestPubSub_PublishMsgs_RegisterSubs_And_Stop(t *testing.T) { + pubsub := NewPubSub() + + const numPublished = 10 + + ts1 := newTestSubscriber(numPublished) + pubsub.Subscribe(ts1) + + var wg sync.WaitGroup + wg.Add(2) + // Publish ten messages on the pubsub and ensure that they are received in order by the subscriber. + go func() { + for i := 0; i < numPublished; i++ { + pubsub.Publish(i) + } + wg.Done() + }() + + go func() { + defer wg.Done() + for i := 0; i < numPublished; i++ { + select { + case m := <-ts1.onMsgCh: + if m != i { + t.Errorf("Received unexpected message: %q; want: %q", m, i) + return + } + case <-time.After(defaultTestTimeout): + t.Error("Timeout when expecting the onMessage() callback to be invoked") + return + } + } + }() + wg.Wait() + if t.Failed() { + t.FailNow() + } + + // Register another subscriber and ensure that it receives the last published message. + ts2 := newTestSubscriber(numPublished) + pubsub.Subscribe(ts2) + + select { + case m := <-ts2.onMsgCh: + if m != numPublished-1 { + t.Fatalf("Received unexpected message: %q; want: %q", m, numPublished-1) + } + case <-time.After(defaultTestShortTimeout): + t.Fatal("Timeout when expecting the onMessage() callback to be invoked") + } + + wg.Add(3) + // Publish ten messages on the pubsub and ensure that they are received in order by the subscribers. + go func() { + for i := 0; i < numPublished; i++ { + pubsub.Publish(i) + } + wg.Done() + }() + go func() { + defer wg.Done() + for i := 0; i < numPublished; i++ { + select { + case m := <-ts1.onMsgCh: + if m != i { + t.Errorf("Received unexpected message: %q; want: %q", m, i) + return + } + case <-time.After(defaultTestTimeout): + t.Error("Timeout when expecting the onMessage() callback to be invoked") + return + } + } + + }() + go func() { + defer wg.Done() + for i := 0; i < numPublished; i++ { + select { + case m := <-ts2.onMsgCh: + if m != i { + t.Errorf("Received unexpected message: %q; want: %q", m, i) + return + } + case <-time.After(defaultTestTimeout): + t.Error("Timeout when expecting the onMessage() callback to be invoked") + return + } + } + }() + wg.Wait() + if t.Failed() { + t.FailNow() + } + + pubsub.Stop() + + go func() { + pubsub.Publish(99) + }() + // Ensure that the subscriber callback is not invoked as instantiated + // pubsub has already closed. + select { + case <-ts1.onMsgCh: + t.Fatal("The callback was invoked after pubsub being stopped") + case <-ts2.onMsgCh: + t.Fatal("The callback was invoked after pubsub being stopped") + case <-time.After(defaultTestShortTimeout): + } +} + +func (s) TestPubSub_PublishMsgs_BeforeRegisterSub(t *testing.T) { + pubsub := NewPubSub() + defer pubsub.Stop() + + const numPublished = 3 + for i := 0; i < numPublished; i++ { + pubsub.Publish(i) + } + + ts := newTestSubscriber(numPublished) + pubsub.Subscribe(ts) + + // Ensure that the subscriber callback is invoked with a previously + // published message. + select { + case d := <-ts.onMsgCh: + if d != numPublished-1 { + t.Fatalf("Unexpected message received: %q; %q", d, numPublished-1) + } + + case <-time.After(defaultTestShortTimeout): + t.Fatal("Timeout when expecting the onMessage() callback to be invoked") + } +} diff --git a/internal/grpctest/grpctest.go b/internal/grpctest/grpctest.go index 53d1c24f4da5..d0a2c533b855 100644 --- a/internal/grpctest/grpctest.go +++ b/internal/grpctest/grpctest.go @@ -79,10 +79,12 @@ func getTestFunc(t *testing.T, xv reflect.Value, name string) func(*testing.T) { // functions, respectively. // // For example usage, see example_test.go. Run it using: -// $ go test -v -run TestExample . +// +// $ go test -v -run TestExample . // // To run a specific test/subtest: -// $ go test -v -run 'TestExample/^Something$' . +// +// $ go test -v -run 'TestExample/^Something$' . func RunSubTests(t *testing.T, x interface{}) { xt := reflect.TypeOf(x) xv := reflect.ValueOf(x) @@ -97,9 +99,13 @@ func RunSubTests(t *testing.T, x interface{}) { } tfunc := getTestFunc(t, xv, methodName) t.Run(strings.TrimPrefix(methodName, "Test"), func(t *testing.T) { + // Run leakcheck in t.Cleanup() to guarantee it is run even if tfunc + // or setup uses t.Fatal(). + // + // Note that a defer would run before t.Cleanup, so if a goroutine + // is closed by a test's t.Cleanup, a deferred leakcheck would fail. + t.Cleanup(func() { teardown(t) }) setup(t) - // defer teardown to guarantee it is run even if tfunc uses t.Fatal() - defer teardown(t) tfunc(t) }) } diff --git a/internal/grpctest/tlogger.go b/internal/grpctest/tlogger.go index 95c3598d1d5d..bbb2a2ff4fb0 100644 --- a/internal/grpctest/tlogger.go +++ b/internal/grpctest/tlogger.go @@ -41,19 +41,34 @@ const callingFrame = 4 type logType int +func (l logType) String() string { + switch l { + case infoLog: + return "INFO" + case warningLog: + return "WARNING" + case errorLog: + return "ERROR" + case fatalLog: + return "FATAL" + } + return "UNKNOWN" +} + const ( - logLog logType = iota + infoLog logType = iota + warningLog errorLog fatalLog ) type tLogger struct { v int - t *testing.T - start time.Time initialized bool - m sync.Mutex // protects errors + mu sync.Mutex // guards t, start, and errors + t *testing.T + start time.Time errors map[*regexp.Regexp]int } @@ -76,12 +91,14 @@ func getCallingPrefix(depth int) (string, error) { // log logs the message with the specified parameters to the tLogger. func (g *tLogger) log(ltype logType, depth int, format string, args ...interface{}) { + g.mu.Lock() + defer g.mu.Unlock() prefix, err := getCallingPrefix(callingFrame + depth) if err != nil { g.t.Error(err) return } - args = append([]interface{}{prefix}, args...) + args = append([]interface{}{ltype.String() + " " + prefix}, args...) args = append(args, fmt.Sprintf(" (t=+%s)", time.Since(g.start))) if format == "" { @@ -119,14 +136,14 @@ func (g *tLogger) log(ltype logType, depth int, format string, args ...interface // Update updates the testing.T that the testing logger logs to. Should be done // before every test. It also initializes the tLogger if it has not already. func (g *tLogger) Update(t *testing.T) { + g.mu.Lock() + defer g.mu.Unlock() if !g.initialized { grpclog.SetLoggerV2(TLogger) g.initialized = true } g.t = t g.start = time.Now() - g.m.Lock() - defer g.m.Unlock() g.errors = map[*regexp.Regexp]int{} } @@ -141,20 +158,20 @@ func (g *tLogger) ExpectError(expr string) { // ExpectErrorN declares an error to be expected n times. func (g *tLogger) ExpectErrorN(expr string, n int) { + g.mu.Lock() + defer g.mu.Unlock() re, err := regexp.Compile(expr) if err != nil { g.t.Error(err) return } - g.m.Lock() - defer g.m.Unlock() g.errors[re] += n } // EndTest checks if expected errors were not encountered. func (g *tLogger) EndTest(t *testing.T) { - g.m.Lock() - defer g.m.Unlock() + g.mu.Lock() + defer g.mu.Unlock() for re, count := range g.errors { if count > 0 { t.Errorf("Expected error '%v' not encountered", re.String()) @@ -165,8 +182,6 @@ func (g *tLogger) EndTest(t *testing.T) { // expected determines if the error string is protected or not. func (g *tLogger) expected(s string) bool { - g.m.Lock() - defer g.m.Unlock() for re, count := range g.errors { if re.FindStringIndex(s) != nil { g.errors[re]-- @@ -180,35 +195,35 @@ func (g *tLogger) expected(s string) bool { } func (g *tLogger) Info(args ...interface{}) { - g.log(logLog, 0, "", args...) + g.log(infoLog, 0, "", args...) } func (g *tLogger) Infoln(args ...interface{}) { - g.log(logLog, 0, "", args...) + g.log(infoLog, 0, "", args...) } func (g *tLogger) Infof(format string, args ...interface{}) { - g.log(logLog, 0, format, args...) + g.log(infoLog, 0, format, args...) } func (g *tLogger) InfoDepth(depth int, args ...interface{}) { - g.log(logLog, depth, "", args...) + g.log(infoLog, depth, "", args...) } func (g *tLogger) Warning(args ...interface{}) { - g.log(logLog, 0, "", args...) + g.log(warningLog, 0, "", args...) } func (g *tLogger) Warningln(args ...interface{}) { - g.log(logLog, 0, "", args...) + g.log(warningLog, 0, "", args...) } func (g *tLogger) Warningf(format string, args ...interface{}) { - g.log(logLog, 0, format, args...) + g.log(warningLog, 0, format, args...) } func (g *tLogger) WarningDepth(depth int, args ...interface{}) { - g.log(logLog, depth, "", args...) + g.log(warningLog, depth, "", args...) } func (g *tLogger) Error(args ...interface{}) { diff --git a/internal/grpcutil/compressor.go b/internal/grpcutil/compressor.go new file mode 100644 index 000000000000..9f4090967980 --- /dev/null +++ b/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/internal/grpcutil/compressor_test.go b/internal/grpcutil/compressor_test.go new file mode 100644 index 000000000000..0d639422a9a0 --- /dev/null +++ b/internal/grpcutil/compressor_test.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "testing" + + "google.golang.org/grpc/internal/envconfig" +) + +func TestRegisteredCompressors(t *testing.T) { + defer func(c []string) { RegisteredCompressorNames = c }(RegisteredCompressorNames) + defer func(v bool) { envconfig.AdvertiseCompressors = v }(envconfig.AdvertiseCompressors) + RegisteredCompressorNames = []string{"gzip", "snappy"} + tests := []struct { + desc string + enabled bool + want string + }{ + {desc: "compressor_ad_disabled", enabled: false, want: ""}, + {desc: "compressor_ad_enabled", enabled: true, want: "gzip,snappy"}, + } + for _, tt := range tests { + envconfig.AdvertiseCompressors = tt.enabled + compressors := RegisteredCompressors() + if compressors != tt.want { + t.Fatalf("Unexpected compressors got:%s, want:%s", compressors, tt.want) + } + } +} diff --git a/internal/grpcutil/grpcutil.go b/internal/grpcutil/grpcutil.go new file mode 100644 index 000000000000..e2f948e8f4f4 --- /dev/null +++ b/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/internal/grpcutil/method.go b/internal/grpcutil/method.go index 4e7475060c1c..ec62b4775e5b 100644 --- a/internal/grpcutil/method.go +++ b/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") @@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) { return methodName[:pos], methodName[pos+1:], nil } +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. const baseContentType = "application/grpc" // ContentSubtype returns the content-subtype for the given content-type. The diff --git a/internal/resolver/dns/go113.go b/internal/grpcutil/regex.go similarity index 63% rename from internal/resolver/dns/go113.go rename to internal/grpcutil/regex.go index 8783a8cf8214..7a092b2b8041 100644 --- a/internal/resolver/dns/go113.go +++ b/internal/grpcutil/regex.go @@ -1,8 +1,6 @@ -// +build go1.13 - /* * - * Copyright 2019 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,16 +16,16 @@ * */ -package dns +package grpcutil -import "net" +import "regexp" -func init() { - filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { - // The name does not exist; not an error. - return nil - } - return err +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/internal/grpcutil/regex_test.go b/internal/grpcutil/regex_test.go new file mode 100644 index 000000000000..4c12804fed5f --- /dev/null +++ b/internal/grpcutil/regex_test.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "regexp" + "testing" +) + +func TestFullMatchWithRegex(t *testing.T) { + tests := []struct { + name string + regexStr string + string string + want bool + }{ + { + name: "not match because only partial", + regexStr: "^a+$", + string: "ab", + want: false, + }, + { + name: "match because fully match", + regexStr: "^a+$", + string: "aa", + want: true, + }, + { + name: "longest", + regexStr: "a(|b)", + string: "ab", + want: true, + }, + { + name: "match all", + regexStr: ".*", + string: "", + want: true, + }, + { + name: "matches non-empty strings", + regexStr: ".+", + string: "", + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hrm := regexp.MustCompile(tt.regexStr) + if got := FullMatchWithRegex(hrm, tt.string); got != tt.want { + t.Errorf("match() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/grpcutil/target.go b/internal/grpcutil/target.go deleted file mode 100644 index 8833021da02e..000000000000 --- a/internal/grpcutil/target.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpcutil provides a bunch of utility functions to be used across the -// gRPC codebase. -package grpcutil - -import ( - "strings" - - "google.golang.org/grpc/resolver" -) - -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// ParseTarget splits target into a resolver.Target struct containing scheme, -// authority and endpoint. skipUnixColonParsing indicates that the parse should -// not parse "unix:[path]" cases. This should be true in cases where a custom -// dialer is present, to prevent a behavior change. -// -// If target is not a valid scheme://authority/endpoint as specified in -// https://github.com/grpc/grpc/blob/master/doc/naming.md, -// it returns {Endpoint: target}. -func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) { - var ok bool - if strings.HasPrefix(target, "unix-abstract:") { - if strings.HasPrefix(target, "unix-abstract://") { - // Maybe, with Authority specified, try to parse it - var remain string - ret.Scheme, remain, _ = split2(target, "://") - ret.Authority, ret.Endpoint, ok = split2(remain, "/") - if !ok { - // No Authority, add the "//" back - ret.Endpoint = "//" + remain - } else { - // Found Authority, add the "/" back - ret.Endpoint = "/" + ret.Endpoint - } - } else { - // Without Authority specified, split target on ":" - ret.Scheme, ret.Endpoint, _ = split2(target, ":") - } - return ret - } - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing { - // Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases, - // because splitting on :// only handles the - // "unix://[/absolute/path]" case. Only handle if the dialer is nil, - // to avoid a behavior change with custom dialers. - return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]} - } - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - if ret.Scheme == "unix" { - // Add the "/" back in the unix case, so the unix resolver receives the - // actual endpoint in the "unix://[/absolute/path]" case. - ret.Endpoint = "/" + ret.Endpoint - } - return ret -} diff --git a/internal/grpcutil/target_test.go b/internal/grpcutil/target_test.go deleted file mode 100644 index f6c586dd0808..000000000000 --- a/internal/grpcutil/target_test.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcutil - -import ( - "testing" - - "google.golang.org/grpc/resolver" -) - -func TestParseTarget(t *testing.T) { - for _, test := range []resolver.Target{ - {Scheme: "dns", Authority: "", Endpoint: "google.com"}, - {Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}, - {Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/?a=b"}, - {Scheme: "passthrough", Authority: "", Endpoint: "/unix/socket/address"}, - } { - str := test.Scheme + "://" + test.Authority + "/" + test.Endpoint - got := ParseTarget(str, false) - if got != test { - t.Errorf("ParseTarget(%q, false) = %+v, want %+v", str, got, test) - } - got = ParseTarget(str, true) - if got != test { - t.Errorf("ParseTarget(%q, true) = %+v, want %+v", str, got, test) - } - } -} - -func TestParseTargetString(t *testing.T) { - for _, test := range []struct { - targetStr string - want resolver.Target - wantWithDialer resolver.Target - }{ - {targetStr: "", want: resolver.Target{Scheme: "", Authority: "", Endpoint: ""}}, - {targetStr: ":///", want: resolver.Target{Scheme: "", Authority: "", Endpoint: ""}}, - {targetStr: "a:///", want: resolver.Target{Scheme: "a", Authority: "", Endpoint: ""}}, - {targetStr: "://a/", want: resolver.Target{Scheme: "", Authority: "a", Endpoint: ""}}, - {targetStr: ":///a", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a"}}, - {targetStr: "a://b/", want: resolver.Target{Scheme: "a", Authority: "b", Endpoint: ""}}, - {targetStr: "a:///b", want: resolver.Target{Scheme: "a", Authority: "", Endpoint: "b"}}, - {targetStr: "://a/b", want: resolver.Target{Scheme: "", Authority: "a", Endpoint: "b"}}, - {targetStr: "a://b/c", want: resolver.Target{Scheme: "a", Authority: "b", Endpoint: "c"}}, - {targetStr: "dns:///google.com", want: resolver.Target{Scheme: "dns", Authority: "", Endpoint: "google.com"}}, - {targetStr: "dns://a.server.com/google.com", want: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com"}}, - {targetStr: "dns://a.server.com/google.com/?a=b", want: resolver.Target{Scheme: "dns", Authority: "a.server.com", Endpoint: "google.com/?a=b"}}, - - {targetStr: "/", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "/"}}, - {targetStr: "google.com", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "google.com"}}, - {targetStr: "google.com/?a=b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "google.com/?a=b"}}, - {targetStr: "/unix/socket/address", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "/unix/socket/address"}}, - - // If we can only parse part of the target. - {targetStr: "://", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "://"}}, - {targetStr: "unix://domain", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "unix://domain"}}, - {targetStr: "unix://a/b/c", want: resolver.Target{Scheme: "unix", Authority: "a", Endpoint: "/b/c"}}, - {targetStr: "a:b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a:b"}}, - {targetStr: "a/b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a/b"}}, - {targetStr: "a:/b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a:/b"}}, - {targetStr: "a//b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a//b"}}, - {targetStr: "a://b", want: resolver.Target{Scheme: "", Authority: "", Endpoint: "a://b"}}, - - // Unix cases without custom dialer. - // unix:[local_path], unix:[/absolute], and unix://[/absolute] have different - // behaviors with a custom dialer, to prevent behavior changes with custom dialers. - {targetStr: "unix:a/b/c", want: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "a/b/c"}, wantWithDialer: resolver.Target{Scheme: "", Authority: "", Endpoint: "unix:a/b/c"}}, - {targetStr: "unix:/a/b/c", want: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "/a/b/c"}, wantWithDialer: resolver.Target{Scheme: "", Authority: "", Endpoint: "unix:/a/b/c"}}, - {targetStr: "unix:///a/b/c", want: resolver.Target{Scheme: "unix", Authority: "", Endpoint: "/a/b/c"}}, - - {targetStr: "unix-abstract:a/b/c", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a/b/c"}}, - {targetStr: "unix-abstract:a b", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a b"}}, - {targetStr: "unix-abstract:a:b", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a:b"}}, - {targetStr: "unix-abstract:a-b", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "a-b"}}, - {targetStr: "unix-abstract:/ a///://::!@#$%^&*()b", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/ a///://::!@#$%^&*()b"}}, - {targetStr: "unix-abstract:passthrough:abc", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "passthrough:abc"}}, - {targetStr: "unix-abstract:unix:///abc", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "unix:///abc"}}, - {targetStr: "unix-abstract:///a/b/c", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/a/b/c"}}, - {targetStr: "unix-abstract://authority/a/b/c", want: resolver.Target{Scheme: "unix-abstract", Authority: "authority", Endpoint: "/a/b/c"}}, - {targetStr: "unix-abstract:///", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "/"}}, - {targetStr: "unix-abstract://authority", want: resolver.Target{Scheme: "unix-abstract", Authority: "", Endpoint: "//authority"}}, - - {targetStr: "passthrough:///unix:///a/b/c", want: resolver.Target{Scheme: "passthrough", Authority: "", Endpoint: "unix:///a/b/c"}}, - } { - got := ParseTarget(test.targetStr, false) - if got != test.want { - t.Errorf("ParseTarget(%q, false) = %+v, want %+v", test.targetStr, got, test.want) - } - wantWithDialer := test.wantWithDialer - if wantWithDialer == (resolver.Target{}) { - wantWithDialer = test.want - } - got = ParseTarget(test.targetStr, true) - if got != wantWithDialer { - t.Errorf("ParseTarget(%q, true) = %+v, want %+v", test.targetStr, got, wantWithDialer) - } - } -} diff --git a/internal/hierarchy/hierarchy.go b/internal/hierarchy/hierarchy.go index a2f990f552e6..884ae22292dc 100644 --- a/internal/hierarchy/hierarchy.go +++ b/internal/hierarchy/hierarchy.go @@ -30,19 +30,37 @@ type pathKeyType string const pathKey = pathKeyType("grpc.internal.address.hierarchical_path") +type pathValue []string + +func (p pathValue) Equal(o interface{}) bool { + op, ok := o.(pathValue) + if !ok { + return false + } + if len(op) != len(p) { + return false + } + for i, v := range p { + if v != op[i] { + return false + } + } + return true +} + // Get returns the hierarchical path of addr. func Get(addr resolver.Address) []string { - attrs := addr.Attributes + attrs := addr.BalancerAttributes if attrs == nil { return nil } - path, _ := attrs.Value(pathKey).([]string) - return path + path, _ := attrs.Value(pathKey).(pathValue) + return ([]string)(path) } // Set overrides the hierarchical path in addr with path. func Set(addr resolver.Address, path []string) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(pathKey, path) + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(pathKey, pathValue(path)) return addr } @@ -52,26 +70,29 @@ func Set(addr resolver.Address, path []string) resolver.Address { // // Input: // [ -// {addr0, path: [p0, wt0]} -// {addr1, path: [p0, wt1]} -// {addr2, path: [p1, wt2]} -// {addr3, path: [p1, wt3]} +// +// {addr0, path: [p0, wt0]} +// {addr1, path: [p0, wt1]} +// {addr2, path: [p1, wt2]} +// {addr3, path: [p1, wt3]} +// // ] // // Addresses will be split into p0/p1, and the p0/p1 will be removed from the // path. // // Output: -// { -// p0: [ -// {addr0, path: [wt0]}, -// {addr1, path: [wt1]}, -// ], -// p1: [ -// {addr2, path: [wt2]}, -// {addr3, path: [wt3]}, -// ], -// } +// +// { +// p0: [ +// {addr0, path: [wt0]}, +// {addr1, path: [wt1]}, +// ], +// p1: [ +// {addr2, path: [wt2]}, +// {addr3, path: [wt3]}, +// ], +// } // // If hierarchical path is not set, or has no path in it, the address is // dropped. diff --git a/internal/hierarchy/hierarchy_test.go b/internal/hierarchy/hierarchy_test.go index fc62f82b0850..1043d5f81dfa 100644 --- a/internal/hierarchy/hierarchy_test.go +++ b/internal/hierarchy/hierarchy_test.go @@ -40,7 +40,7 @@ func TestGet(t *testing.T) { { name: "set", addr: resolver.Address{ - Attributes: attributes.New(pathKey, []string{"a", "b"}), + BalancerAttributes: attributes.New(pathKey, pathValue{"a", "b"}), }, want: []string{"a", "b"}, }, @@ -68,7 +68,7 @@ func TestSet(t *testing.T) { { name: "before is set", addr: resolver.Address{ - Attributes: attributes.New(pathKey, []string{"before", "a", "b"}), + BalancerAttributes: attributes.New(pathKey, pathValue{"before", "a", "b"}), }, path: []string{"a", "b"}, }, @@ -93,19 +93,19 @@ func TestGroup(t *testing.T) { { name: "all with hierarchy", addrs: []resolver.Address{ - {Addr: "a0", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "b0", Attributes: attributes.New(pathKey, []string{"b"})}, - {Addr: "b1", Attributes: attributes.New(pathKey, []string{"b"})}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "b0", BalancerAttributes: attributes.New(pathKey, pathValue{"b"})}, + {Addr: "b1", BalancerAttributes: attributes.New(pathKey, pathValue{"b"})}, }, want: map[string][]resolver.Address{ "a": { - {Addr: "a0", Attributes: attributes.New(pathKey, []string{})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{})}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{})}, }, "b": { - {Addr: "b0", Attributes: attributes.New(pathKey, []string{})}, - {Addr: "b1", Attributes: attributes.New(pathKey, []string{})}, + {Addr: "b0", BalancerAttributes: attributes.New(pathKey, pathValue{})}, + {Addr: "b1", BalancerAttributes: attributes.New(pathKey, pathValue{})}, }, }, }, @@ -113,15 +113,15 @@ func TestGroup(t *testing.T) { // Addresses without hierarchy are ignored. name: "without hierarchy", addrs: []resolver.Address{ - {Addr: "a0", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "b0", Attributes: nil}, - {Addr: "b1", Attributes: nil}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "b0", BalancerAttributes: nil}, + {Addr: "b1", BalancerAttributes: nil}, }, want: map[string][]resolver.Address{ "a": { - {Addr: "a0", Attributes: attributes.New(pathKey, []string{})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{})}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{})}, }, }, }, @@ -130,15 +130,15 @@ func TestGroup(t *testing.T) { // the address is ignored. name: "wrong type", addrs: []resolver.Address{ - {Addr: "a0", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{"a"})}, - {Addr: "b0", Attributes: attributes.New(pathKey, "b")}, - {Addr: "b1", Attributes: attributes.New(pathKey, 314)}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{"a"})}, + {Addr: "b0", BalancerAttributes: attributes.New(pathKey, "b")}, + {Addr: "b1", BalancerAttributes: attributes.New(pathKey, 314)}, }, want: map[string][]resolver.Address{ "a": { - {Addr: "a0", Attributes: attributes.New(pathKey, []string{})}, - {Addr: "a1", Attributes: attributes.New(pathKey, []string{})}, + {Addr: "a0", BalancerAttributes: attributes.New(pathKey, pathValue{})}, + {Addr: "a1", BalancerAttributes: attributes.New(pathKey, pathValue{})}, }, }, }, @@ -167,14 +167,14 @@ func TestGroupE2E(t *testing.T) { var addrsWithHierarchy []resolver.Address for p, wts := range hierarchy { - path1 := []string{p} + path1 := pathValue{p} for wt, addrs := range wts { - path2 := append([]string(nil), path1...) + path2 := append(pathValue(nil), path1...) path2 = append(path2, wt) for _, addr := range addrs { a := resolver.Address{ - Addr: addr, - Attributes: attributes.New(pathKey, path2), + Addr: addr, + BalancerAttributes: attributes.New(pathKey, path2), } addrsWithHierarchy = append(addrsWithHierarchy, a) } diff --git a/internal/internal.go b/internal/internal.go index 1e2834c70f67..42ff39c84446 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -38,17 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // NewRequestInfoContext creates a new context based on the argument context attaching - // the passed in RequestInfo to the new context. - NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context - // NewClientHandshakeInfoContext returns a copy of the input context with - // the passed in ClientHandshakeInfo struct added to it. - NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -65,6 +58,112 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString interface{} // func (codes.Code) string + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports interface{} // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions interface{} // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions interface{} // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -87,3 +186,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/internal/leakcheck/leakcheck.go b/internal/leakcheck/leakcheck.go index 1d4fcef994ba..80e43beb6c0e 100644 --- a/internal/leakcheck/leakcheck.go +++ b/internal/leakcheck/leakcheck.go @@ -42,7 +42,12 @@ var goroutinesToIgnore = []string{ "runtime_mcall", "(*loggingT).flushDaemon", "goroutine in C code", - "httputil.DumpRequestOut", // TODO: Remove this once Go1.13 support is removed. https://github.com/golang/go/issues/37669. + // Ignore the http read/write goroutines. gce metadata.OnGCE() was leaking + // these, root cause unknown. + // + // https://github.com/grpc/grpc-go/issues/5171 + // https://github.com/grpc/grpc-go/issues/5173 + "created by net/http.(*Transport).dialConn", } // RegisterIgnoreGoroutine appends s into the ignore goroutine list. The diff --git a/internal/metadata/metadata.go b/internal/metadata/metadata.go index 302262613a02..c82e608e0773 100644 --- a/internal/metadata/metadata.go +++ b/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -30,14 +33,38 @@ type mdKeyType string const mdKey = mdKeyType("grpc.internal.address.metadata") +type mdValue metadata.MD + +func (m mdValue) Equal(o interface{}) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + // Get returns the metadata of addr. func Get(addr resolver.Address) metadata.MD { attrs := addr.Attributes if attrs == nil { return nil } - md, _ := attrs.Value(mdKey).(metadata.MD) - return md + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) } // Set sets (overrides) the metadata in addr. @@ -45,6 +72,61 @@ func Get(addr resolver.Address) metadata.MD { // When a SubConn is created with this address, the RPCs sent on it will all // have this metadata. func Set(addr resolver.Address, md metadata.MD) resolver.Address { - addr.Attributes = addr.Attributes.WithValues(mdKey, md) + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/internal/metadata/metadata_test.go b/internal/metadata/metadata_test.go index 68c2ca5808c7..8f0e430e5ed4 100644 --- a/internal/metadata/metadata_test.go +++ b/internal/metadata/metadata_test.go @@ -19,6 +19,8 @@ package metadata import ( + "errors" + "reflect" "testing" "github.com/google/go-cmp/cmp" @@ -41,7 +43,7 @@ func TestGet(t *testing.T) { { name: "not set", addr: resolver.Address{ - Attributes: attributes.New(mdKey, metadata.Pairs("k", "v")), + Attributes: attributes.New(mdKey, mdValue(metadata.Pairs("k", "v"))), }, want: metadata.Pairs("k", "v"), }, @@ -69,7 +71,7 @@ func TestSet(t *testing.T) { { name: "set before", addr: resolver.Address{ - Attributes: attributes.New(mdKey, metadata.Pairs("bef", "ore")), + Attributes: attributes.New(mdKey, mdValue(metadata.Pairs("bef", "ore"))), }, md: metadata.Pairs("k", "v"), }, @@ -84,3 +86,32 @@ func TestSet(t *testing.T) { }) } } + +func TestValidate(t *testing.T) { + for _, test := range []struct { + md metadata.MD + want error + }{ + { + md: map[string][]string{string(rune(0x19)): {"testVal"}}, + want: errors.New("header key \"\\x19\" contains illegal characters not in [0-9a-z-_.]"), + }, + { + md: map[string][]string{"test": {string(rune(0x19))}}, + want: errors.New("header key \"test\" contains value with non-printable ASCII characters"), + }, + { + md: map[string][]string{"": {"valid"}}, + want: errors.New("there is an empty key in the header"), + }, + { + md: map[string][]string{"test-bin": {string(rune(0x19))}}, + want: nil, + }, + } { + err := Validate(test.md) + if !reflect.DeepEqual(err, test.want) { + t.Errorf("validating metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + } + } +} diff --git a/internal/pretty/pretty.go b/internal/pretty/pretty.go new file mode 100644 index 000000000000..0177af4b5114 --- /dev/null +++ b/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/internal/profiling/buffer/buffer.go b/internal/profiling/buffer/buffer.go index 45745cd09197..8bf89c901458 100644 --- a/internal/profiling/buffer/buffer.go +++ b/internal/profiling/buffer/buffer.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2019 gRPC authors. @@ -246,7 +244,7 @@ func (cb *CircularBuffer) Drain() []interface{} { } var wg sync.WaitGroup - wg.Add(int(len(qs))) + wg.Add(len(qs)) for i := 0; i < len(qs); i++ { go func(qi int) { qs[qi].drainWait() diff --git a/internal/profiling/buffer/buffer_appengine.go b/internal/profiling/buffer/buffer_appengine.go deleted file mode 100644 index c92599e5b9c0..000000000000 --- a/internal/profiling/buffer/buffer_appengine.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build appengine - -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package buffer - -// CircularBuffer is a no-op implementation for appengine builds. -// -// Appengine does not support stats because of lack of the support for unsafe -// pointers, which are necessary to efficiently store and retrieve things into -// and from a circular buffer. As a result, Push does not do anything and Drain -// returns an empty slice. -type CircularBuffer struct{} - -// NewCircularBuffer returns a no-op for appengine builds. -func NewCircularBuffer(size uint32) (*CircularBuffer, error) { - return nil, nil -} - -// Push returns a no-op for appengine builds. -func (cb *CircularBuffer) Push(x interface{}) { -} - -// Drain returns a no-op for appengine builds. -func (cb *CircularBuffer) Drain() []interface{} { - return nil -} diff --git a/internal/profiling/buffer/buffer_test.go b/internal/profiling/buffer/buffer_test.go index 86bd77d4a2e6..a7f3b61e4afa 100644 --- a/internal/profiling/buffer/buffer_test.go +++ b/internal/profiling/buffer/buffer_test.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2019 gRPC authors. diff --git a/internal/profiling/goid_modified.go b/internal/profiling/goid_modified.go index b186499cd0d1..f2bae99b2a95 100644 --- a/internal/profiling/goid_modified.go +++ b/internal/profiling/goid_modified.go @@ -1,3 +1,4 @@ +//go:build grpcgoid // +build grpcgoid /* @@ -32,48 +33,48 @@ import ( // // Several other approaches were considered before arriving at this: // -// 1. Using a CGO module: CGO usually has access to some things that regular -// Go does not. Till go1.4, CGO used to have access to the goroutine struct -// because the Go runtime was written in C. However, 1.5+ uses a native Go -// runtime; as a result, CGO does not have access to the goroutine structure -// anymore in modern Go. Besides, CGO interop wasn't fast enough (estimated -// to be ~170ns/op). This would also make building grpc require a C -// compiler, which isn't a requirement currently, breaking a lot of stuff. +// 1. Using a CGO module: CGO usually has access to some things that regular +// Go does not. Till go1.4, CGO used to have access to the goroutine struct +// because the Go runtime was written in C. However, 1.5+ uses a native Go +// runtime; as a result, CGO does not have access to the goroutine structure +// anymore in modern Go. Besides, CGO interop wasn't fast enough (estimated +// to be ~170ns/op). This would also make building grpc require a C +// compiler, which isn't a requirement currently, breaking a lot of stuff. // -// 2. Using runtime.Stack stacktrace: While this would remove the need for a -// modified Go runtime, this is ridiculously slow, thanks to the all the -// string processing shenanigans required to extract the goroutine ID (about -// ~2000ns/op). +// 2. Using runtime.Stack stacktrace: While this would remove the need for a +// modified Go runtime, this is ridiculously slow, thanks to the all the +// string processing shenanigans required to extract the goroutine ID (about +// ~2000ns/op). // -// 3. Using Go version-specific build tags: For any given Go version, the -// goroutine struct has a fixed structure. As a result, the goroutine ID -// could be extracted if we know the offset using some assembly. This would -// be faster then #1 and #2, but is harder to maintain. This would require -// special Go code that's both architecture-specific and go version-specific -// (a quadratic number of variants to maintain). +// 3. Using Go version-specific build tags: For any given Go version, the +// goroutine struct has a fixed structure. As a result, the goroutine ID +// could be extracted if we know the offset using some assembly. This would +// be faster then #1 and #2, but is harder to maintain. This would require +// special Go code that's both architecture-specific and go version-specific +// (a quadratic number of variants to maintain). // -// 4. This approach, which requires a simple modification [1] to the Go runtime -// to expose the current goroutine's ID. This is the chosen approach and it -// takes about ~2 ns/op, which is negligible in the face of the tens of -// microseconds that grpc takes to complete a RPC request. +// 4. This approach, which requires a simple modification [1] to the Go runtime +// to expose the current goroutine's ID. This is the chosen approach and it +// takes about ~2 ns/op, which is negligible in the face of the tens of +// microseconds that grpc takes to complete a RPC request. // // [1] To make the goroutine ID visible to Go programs apply the following // change to the runtime2.go file in your Go runtime installation: // -// diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go -// --- a/src/runtime/runtime2.go -// +++ b/src/runtime/runtime2.go -// @@ -392,6 +392,10 @@ type stack struct { -// hi uintptr -// } +// diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go +// --- a/src/runtime/runtime2.go +// +++ b/src/runtime/runtime2.go +// @@ -392,6 +392,10 @@ type stack struct { +// hi uintptr +// } // -// +func Goid() int64 { -// + return getg().goid -// +} -// + -// type g struct { -// // Stack parameters. -// // stack describes the actual stack memory: [stack.lo, stack.hi). +// +func Goid() int64 { +// + return getg().goid +// +} +// + +// type g struct { +// // Stack parameters. +// // stack describes the actual stack memory: [stack.lo, stack.hi). // // The exposed runtime.Goid() function will return a int64 goroutine ID. func goid() int64 { diff --git a/internal/profiling/goid_regular.go b/internal/profiling/goid_regular.go index 891c2e98f9db..042933227d81 100644 --- a/internal/profiling/goid_regular.go +++ b/internal/profiling/goid_regular.go @@ -1,3 +1,4 @@ +//go:build !grpcgoid // +build !grpcgoid /* diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go b/internal/proto/grpc_lookup_v1/rls.pb.go similarity index 51% rename from balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go rename to internal/proto/grpc_lookup_v1/rls.pb.go index 7741e6649180..df4cd5484e4e 100644 --- a/balancer/rls/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,30 +34,68 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 +// Possible reasons for making a request. +type RouteLookupRequest_Reason int32 + +const ( + RouteLookupRequest_REASON_UNKNOWN RouteLookupRequest_Reason = 0 // Unused + RouteLookupRequest_REASON_MISS RouteLookupRequest_Reason = 1 // No data available in local cache + RouteLookupRequest_REASON_STALE RouteLookupRequest_Reason = 2 // Data in local cache is stale +) + +// Enum value maps for RouteLookupRequest_Reason. +var ( + RouteLookupRequest_Reason_name = map[int32]string{ + 0: "REASON_UNKNOWN", + 1: "REASON_MISS", + 2: "REASON_STALE", + } + RouteLookupRequest_Reason_value = map[string]int32{ + "REASON_UNKNOWN": 0, + "REASON_MISS": 1, + "REASON_STALE": 2, + } +) + +func (x RouteLookupRequest_Reason) Enum() *RouteLookupRequest_Reason { + p := new(RouteLookupRequest_Reason) + *p = x + return p +} + +func (x RouteLookupRequest_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RouteLookupRequest_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_lookup_v1_rls_proto_enumTypes[0].Descriptor() +} + +func (RouteLookupRequest_Reason) Type() protoreflect.EnumType { + return &file_grpc_lookup_v1_rls_proto_enumTypes[0] +} + +func (x RouteLookupRequest_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RouteLookupRequest_Reason.Descriptor instead. +func (RouteLookupRequest_Reason) EnumDescriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0, 0} +} type RouteLookupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Full host name of the target server, e.g. firestore.googleapis.com. - // Only set for gRPC requests; HTTP requests must use key_map explicitly. - // Deprecated in favor of setting key_map keys with GrpcKeyBuilder.extra_keys. - // - // Deprecated: Do not use. - Server string `protobuf:"bytes,1,opt,name=server,proto3" json:"server,omitempty"` - // Full path of the request, i.e. "/service/method". - // Only set for gRPC requests; HTTP requests must use key_map explicitly. - // Deprecated in favor of setting key_map keys with GrpcKeyBuilder.extra_keys. - // - // Deprecated: Do not use. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` // Target type allows the client to specify what kind of target format it // would like from RLS to allow it to find the regional server, e.g. "grpc". TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"` + // Reason for making this request. + Reason RouteLookupRequest_Reason `protobuf:"varint,5,opt,name=reason,proto3,enum=grpc.lookup.v1.RouteLookupRequest_Reason" json:"reason,omitempty"` + // For REASON_STALE, the header_data from the stale response, if any. + StaleHeaderData string `protobuf:"bytes,6,opt,name=stale_header_data,json=staleHeaderData,proto3" json:"stale_header_data,omitempty"` // Map of key values extracted via key builders for the gRPC or HTTP request. KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -95,25 +132,23 @@ func (*RouteLookupRequest) Descriptor() ([]byte, []int) { return file_grpc_lookup_v1_rls_proto_rawDescGZIP(), []int{0} } -// Deprecated: Do not use. -func (x *RouteLookupRequest) GetServer() string { +func (x *RouteLookupRequest) GetTargetType() string { if x != nil { - return x.Server + return x.TargetType } return "" } -// Deprecated: Do not use. -func (x *RouteLookupRequest) GetPath() string { +func (x *RouteLookupRequest) GetReason() RouteLookupRequest_Reason { if x != nil { - return x.Path + return x.Reason } - return "" + return RouteLookupRequest_REASON_UNKNOWN } -func (x *RouteLookupRequest) GetTargetType() string { +func (x *RouteLookupRequest) GetStaleHeaderData() string { if x != nil { - return x.TargetType + return x.StaleHeaderData } return "" } @@ -191,41 +226,50 @@ var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0xed, 0x01, 0x0a, 0x12, 0x52, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0x83, 0x03, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, 0x0a, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, - 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, - 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, - 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, - 0x39, 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, - 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, - 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, - 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, - 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, + 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, + 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, + 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, + 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, + 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -240,21 +284,24 @@ func file_grpc_lookup_v1_rls_proto_rawDescGZIP() []byte { return file_grpc_lookup_v1_rls_proto_rawDescData } +var file_grpc_lookup_v1_rls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_lookup_v1_rls_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{ - (*RouteLookupRequest)(nil), // 0: grpc.lookup.v1.RouteLookupRequest - (*RouteLookupResponse)(nil), // 1: grpc.lookup.v1.RouteLookupResponse - nil, // 2: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + (RouteLookupRequest_Reason)(0), // 0: grpc.lookup.v1.RouteLookupRequest.Reason + (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest + (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse + nil, // 3: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry } var file_grpc_lookup_v1_rls_proto_depIdxs = []int32{ - 2, // 0: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry - 0, // 1: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest - 1, // 2: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 0, // 0: grpc.lookup.v1.RouteLookupRequest.reason:type_name -> grpc.lookup.v1.RouteLookupRequest.Reason + 3, // 1: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + 1, // 2: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest + 2, // 3: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_grpc_lookup_v1_rls_proto_init() } @@ -293,13 +340,14 @@ func file_grpc_lookup_v1_rls_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_lookup_v1_rls_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 3, NumExtensions: 0, NumServices: 1, }, GoTypes: file_grpc_lookup_v1_rls_proto_goTypes, DependencyIndexes: file_grpc_lookup_v1_rls_proto_depIdxs, + EnumInfos: file_grpc_lookup_v1_rls_proto_enumTypes, MessageInfos: file_grpc_lookup_v1_rls_proto_msgTypes, }.Build() File_grpc_lookup_v1_rls_proto = out.File diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go b/internal/proto/grpc_lookup_v1/rls_config.pb.go similarity index 87% rename from balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go rename to internal/proto/grpc_lookup_v1/rls_config.pb.go index 414b74cdb3b5..317a35a390c4 100644 --- a/balancer/rls/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/lookup/v1/rls_config.proto package grpc_lookup_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Extract a key based on a given name (e.g. header name or query parameter // name). The name must match one of the names listed in the "name" field. If // the "required_match" field is true, one of the specified names must be @@ -204,8 +199,10 @@ func (x *GrpcKeyBuilder) GetConstantKeys() map[string]string { // // For a service where the project id can be expressed either as a subdomain or // in the path, separate HttpKeyBuilders must be used: -// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**' -// host_pattern: '{id}.example.com' path_pattern: '/{object}/**' +// +// host_pattern: 'example.com' path_pattern: '/{id}/{object}/**' +// host_pattern: '{id}.example.com' path_pattern: '/{object}/**' +// // If the host is exactly 'example.com', the first path segment will be used as // the id and the second segment as the object. If the host has a subdomain, the // subdomain will be used as the id and the first segment as the object. If @@ -223,7 +220,7 @@ type HttpKeyBuilder struct { // - "*": Matches any single label. // - "**": Matches zero or more labels (first or last part of host only). // - "{=...}": One or more label capture, where "..." can be any - // template that does not include a capture. + // template that does not include a capture. // - "{}": A single label capture. Identical to {=*}. // // Examples: @@ -242,8 +239,9 @@ type HttpKeyBuilder struct { // - "*": Matches any single segment. // - "**": Matches zero or more segments (first or last part of path only). // - "{=...}": One or more segment capture, where "..." can be any - // template that does not include a capture. + // template that does not include a capture. // - "{}": A single segment capture. Identical to {=*}. + // // A custom method may also be specified by appending ":" and the custom // method name or "*" to indicate any custom method (including no custom // method). For example, "/*/projects/{project_id}/**:*" extracts @@ -486,6 +484,56 @@ func (x *RouteLookupConfig) GetDefaultTarget() string { return "" } +// RouteLookupClusterSpecifier is used in xDS to represent a cluster specifier +// plugin for RLS. +type RouteLookupClusterSpecifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The RLS config for this cluster specifier plugin instance. + RouteLookupConfig *RouteLookupConfig `protobuf:"bytes,1,opt,name=route_lookup_config,json=routeLookupConfig,proto3" json:"route_lookup_config,omitempty"` +} + +func (x *RouteLookupClusterSpecifier) Reset() { + *x = RouteLookupClusterSpecifier{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RouteLookupClusterSpecifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RouteLookupClusterSpecifier) ProtoMessage() {} + +func (x *RouteLookupClusterSpecifier) ProtoReflect() protoreflect.Message { + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RouteLookupClusterSpecifier.ProtoReflect.Descriptor instead. +func (*RouteLookupClusterSpecifier) Descriptor() ([]byte, []int) { + return file_grpc_lookup_v1_rls_config_proto_rawDescGZIP(), []int{4} +} + +func (x *RouteLookupClusterSpecifier) GetRouteLookupConfig() *RouteLookupConfig { + if x != nil { + return x.RouteLookupConfig + } + return nil +} + // To match, one of the given Name fields must match; the service and method // fields are specified as fixed strings. The service name is required and // includes the proto package name. The method name may be omitted, in @@ -502,7 +550,7 @@ type GrpcKeyBuilder_Name struct { func (x *GrpcKeyBuilder_Name) Reset() { *x = GrpcKeyBuilder_Name{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -515,7 +563,7 @@ func (x *GrpcKeyBuilder_Name) String() string { func (*GrpcKeyBuilder_Name) ProtoMessage() {} func (x *GrpcKeyBuilder_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[4] + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -564,7 +612,7 @@ type GrpcKeyBuilder_ExtraKeys struct { func (x *GrpcKeyBuilder_ExtraKeys) Reset() { *x = GrpcKeyBuilder_ExtraKeys{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -577,7 +625,7 @@ func (x *GrpcKeyBuilder_ExtraKeys) String() string { func (*GrpcKeyBuilder_ExtraKeys) ProtoMessage() {} func (x *GrpcKeyBuilder_ExtraKeys) ProtoReflect() protoreflect.Message { - mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[5] + mi := &file_grpc_lookup_v1_rls_config_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -716,13 +764,20 @@ var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x42, - 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, + 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, + 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, + 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -737,36 +792,38 @@ func file_grpc_lookup_v1_rls_config_proto_rawDescGZIP() []byte { return file_grpc_lookup_v1_rls_config_proto_rawDescData } -var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_grpc_lookup_v1_rls_config_proto_goTypes = []interface{}{ - (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher - (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder - (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder - (*RouteLookupConfig)(nil), // 3: grpc.lookup.v1.RouteLookupConfig - (*GrpcKeyBuilder_Name)(nil), // 4: grpc.lookup.v1.GrpcKeyBuilder.Name - (*GrpcKeyBuilder_ExtraKeys)(nil), // 5: grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys - nil, // 6: grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry - nil, // 7: grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry - (*durationpb.Duration)(nil), // 8: google.protobuf.Duration + (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher + (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder + (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder + (*RouteLookupConfig)(nil), // 3: grpc.lookup.v1.RouteLookupConfig + (*RouteLookupClusterSpecifier)(nil), // 4: grpc.lookup.v1.RouteLookupClusterSpecifier + (*GrpcKeyBuilder_Name)(nil), // 5: grpc.lookup.v1.GrpcKeyBuilder.Name + (*GrpcKeyBuilder_ExtraKeys)(nil), // 6: grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + nil, // 7: grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + nil, // 8: grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + (*durationpb.Duration)(nil), // 9: google.protobuf.Duration } var file_grpc_lookup_v1_rls_config_proto_depIdxs = []int32{ - 4, // 0: grpc.lookup.v1.GrpcKeyBuilder.names:type_name -> grpc.lookup.v1.GrpcKeyBuilder.Name - 5, // 1: grpc.lookup.v1.GrpcKeyBuilder.extra_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys + 5, // 0: grpc.lookup.v1.GrpcKeyBuilder.names:type_name -> grpc.lookup.v1.GrpcKeyBuilder.Name + 6, // 1: grpc.lookup.v1.GrpcKeyBuilder.extra_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ExtraKeys 0, // 2: grpc.lookup.v1.GrpcKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher - 6, // 3: grpc.lookup.v1.GrpcKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry + 7, // 3: grpc.lookup.v1.GrpcKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.GrpcKeyBuilder.ConstantKeysEntry 0, // 4: grpc.lookup.v1.HttpKeyBuilder.query_parameters:type_name -> grpc.lookup.v1.NameMatcher 0, // 5: grpc.lookup.v1.HttpKeyBuilder.headers:type_name -> grpc.lookup.v1.NameMatcher - 7, // 6: grpc.lookup.v1.HttpKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry + 8, // 6: grpc.lookup.v1.HttpKeyBuilder.constant_keys:type_name -> grpc.lookup.v1.HttpKeyBuilder.ConstantKeysEntry 2, // 7: grpc.lookup.v1.RouteLookupConfig.http_keybuilders:type_name -> grpc.lookup.v1.HttpKeyBuilder 1, // 8: grpc.lookup.v1.RouteLookupConfig.grpc_keybuilders:type_name -> grpc.lookup.v1.GrpcKeyBuilder - 8, // 9: grpc.lookup.v1.RouteLookupConfig.lookup_service_timeout:type_name -> google.protobuf.Duration - 8, // 10: grpc.lookup.v1.RouteLookupConfig.max_age:type_name -> google.protobuf.Duration - 8, // 11: grpc.lookup.v1.RouteLookupConfig.stale_age:type_name -> google.protobuf.Duration - 12, // [12:12] is the sub-list for method output_type - 12, // [12:12] is the sub-list for method input_type - 12, // [12:12] is the sub-list for extension type_name - 12, // [12:12] is the sub-list for extension extendee - 0, // [0:12] is the sub-list for field type_name + 9, // 9: grpc.lookup.v1.RouteLookupConfig.lookup_service_timeout:type_name -> google.protobuf.Duration + 9, // 10: grpc.lookup.v1.RouteLookupConfig.max_age:type_name -> google.protobuf.Duration + 9, // 11: grpc.lookup.v1.RouteLookupConfig.stale_age:type_name -> google.protobuf.Duration + 3, // 12: grpc.lookup.v1.RouteLookupClusterSpecifier.route_lookup_config:type_name -> grpc.lookup.v1.RouteLookupConfig + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name } func init() { file_grpc_lookup_v1_rls_config_proto_init() } @@ -824,7 +881,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { } } file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcKeyBuilder_Name); i { + switch v := v.(*RouteLookupClusterSpecifier); i { case 0: return &v.state case 1: @@ -836,6 +893,18 @@ func file_grpc_lookup_v1_rls_config_proto_init() { } } file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcKeyBuilder_Name); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { case 0: return &v.state @@ -854,7 +923,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_lookup_v1_rls_config_proto_rawDesc, NumEnums: 0, - NumMessages: 8, + NumMessages: 9, NumExtensions: 0, NumServices: 0, }, diff --git a/balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go similarity index 79% rename from balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go rename to internal/proto/grpc_lookup_v1/rls_grpc.pb.go index b469089ed570..2435fbc9a9b9 100644 --- a/balancer/rls/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -1,4 +1,22 @@ +// Copyright 2020 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 @@ -14,6 +32,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + RouteLookupService_RouteLookup_FullMethodName = "/grpc.lookup.v1.RouteLookupService/RouteLookup" +) + // RouteLookupServiceClient is the client API for RouteLookupService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -32,7 +54,7 @@ func NewRouteLookupServiceClient(cc grpc.ClientConnInterface) RouteLookupService func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLookupRequest, opts ...grpc.CallOption) (*RouteLookupResponse, error) { out := new(RouteLookupResponse) - err := c.cc.Invoke(ctx, "/grpc.lookup.v1.RouteLookupService/RouteLookup", in, out, opts...) + err := c.cc.Invoke(ctx, RouteLookupService_RouteLookup_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -78,7 +100,7 @@ func _RouteLookupService_RouteLookup_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.lookup.v1.RouteLookupService/RouteLookup", + FullMethod: RouteLookupService_RouteLookup_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(RouteLookupServiceServer).RouteLookup(ctx, req.(*RouteLookupRequest)) diff --git a/internal/proto/grpc_service_config/example_test.go b/internal/proto/grpc_service_config/example_test.go deleted file mode 100644 index b707d8b05e39..000000000000 --- a/internal/proto/grpc_service_config/example_test.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package grpc_service_config_test - -import ( - "testing" - - "github.com/golang/protobuf/jsonpb" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "google.golang.org/grpc/internal/grpctest" - scpb "google.golang.org/grpc/internal/proto/grpc_service_config" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -// TestXdsConfigMarshalToJSON is an example to print json format of xds_config. -func (s) TestXdsConfigMarshalToJSON(t *testing.T) { - c := &scpb.XdsConfig{ - ChildPolicy: []*scpb.LoadBalancingConfig{ - {Policy: &scpb.LoadBalancingConfig_Grpclb{ - Grpclb: &scpb.GrpcLbConfig{}, - }}, - {Policy: &scpb.LoadBalancingConfig_RoundRobin{ - RoundRobin: &scpb.RoundRobinConfig{}, - }}, - }, - FallbackPolicy: []*scpb.LoadBalancingConfig{ - {Policy: &scpb.LoadBalancingConfig_Grpclb{ - Grpclb: &scpb.GrpcLbConfig{}, - }}, - {Policy: &scpb.LoadBalancingConfig_PickFirst{ - PickFirst: &scpb.PickFirstConfig{}, - }}, - }, - EdsServiceName: "eds.service.name", - LrsLoadReportingServerName: &wrapperspb.StringValue{ - Value: "lrs.server.name", - }, - } - j, err := (&jsonpb.Marshaler{}).MarshalToString(c) - if err != nil { - t.Fatalf("failed to marshal proto to json: %v", err) - } - t.Logf(j) -} diff --git a/internal/proto/grpc_service_config/service_config.pb.go b/internal/proto/grpc_service_config/service_config.pb.go deleted file mode 100644 index b50d59d79352..000000000000 --- a/internal/proto/grpc_service_config/service_config.pb.go +++ /dev/null @@ -1,2419 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// A ServiceConfig is supplied when a service is deployed. It mostly contains -// parameters for how clients that connect to the service should behave (for -// example, the load balancing policy to use to pick between service replicas). -// -// The configuration options provided here act as overrides to automatically -// chosen option values. Service owners should be conservative in specifying -// options as the system is likely to choose better values for these options in -// the vast majority of cases. In other words, please specify a configuration -// option only if you really have to, and avoid copy-paste inclusion of configs. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: grpc/service_config/service_config.proto - -package grpc_service_config - -import ( - proto "github.com/golang/protobuf/proto" - code "google.golang.org/genproto/googleapis/rpc/code" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// Load balancing policy. -// -// Note that load_balancing_policy is deprecated in favor of -// load_balancing_config; the former will be used only if the latter -// is unset. -// -// If no LB policy is configured here, then the default is pick_first. -// If the policy name is set via the client API, that value overrides -// the value specified here. -// -// If the deprecated load_balancing_policy field is used, note that if the -// resolver returns at least one balancer address (as opposed to backend -// addresses), gRPC will use grpclb (see -// https://github.com/grpc/grpc/blob/master/doc/load-balancing.md), -// regardless of what policy is configured here. However, if the resolver -// returns at least one backend address in addition to the balancer -// address(es), the client may fall back to the requested policy if it -// is unable to reach any of the grpclb load balancers. -type ServiceConfig_LoadBalancingPolicy int32 - -const ( - ServiceConfig_UNSPECIFIED ServiceConfig_LoadBalancingPolicy = 0 - ServiceConfig_ROUND_ROBIN ServiceConfig_LoadBalancingPolicy = 1 -) - -// Enum value maps for ServiceConfig_LoadBalancingPolicy. -var ( - ServiceConfig_LoadBalancingPolicy_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "ROUND_ROBIN", - } - ServiceConfig_LoadBalancingPolicy_value = map[string]int32{ - "UNSPECIFIED": 0, - "ROUND_ROBIN": 1, - } -) - -func (x ServiceConfig_LoadBalancingPolicy) Enum() *ServiceConfig_LoadBalancingPolicy { - p := new(ServiceConfig_LoadBalancingPolicy) - *p = x - return p -} - -func (x ServiceConfig_LoadBalancingPolicy) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ServiceConfig_LoadBalancingPolicy) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_service_config_service_config_proto_enumTypes[0].Descriptor() -} - -func (ServiceConfig_LoadBalancingPolicy) Type() protoreflect.EnumType { - return &file_grpc_service_config_service_config_proto_enumTypes[0] -} - -func (x ServiceConfig_LoadBalancingPolicy) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ServiceConfig_LoadBalancingPolicy.Descriptor instead. -func (ServiceConfig_LoadBalancingPolicy) EnumDescriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 0} -} - -// Configuration for a method. -type MethodConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []*MethodConfig_Name `protobuf:"bytes,1,rep,name=name,proto3" json:"name,omitempty"` - // Whether RPCs sent to this method should wait until the connection is - // ready by default. If false, the RPC will abort immediately if there is - // a transient failure connecting to the server. Otherwise, gRPC will - // attempt to connect until the deadline is exceeded. - // - // The value specified via the gRPC client API will override the value - // set here. However, note that setting the value in the client API will - // also affect transient errors encountered during name resolution, which - // cannot be caught by the value here, since the service config is - // obtained by the gRPC client via name resolution. - WaitForReady *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=wait_for_ready,json=waitForReady,proto3" json:"wait_for_ready,omitempty"` - // The default timeout in seconds for RPCs sent to this method. This can be - // overridden in code. If no reply is received in the specified amount of - // time, the request is aborted and a DEADLINE_EXCEEDED error status - // is returned to the caller. - // - // The actual deadline used will be the minimum of the value specified here - // and the value set by the application via the gRPC client API. If either - // one is not set, then the other will be used. If neither is set, then the - // request has no deadline. - Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"` - // The maximum allowed payload size for an individual request or object in a - // stream (client->server) in bytes. The size which is measured is the - // serialized payload after per-message compression (but before stream - // compression) in bytes. This applies both to streaming and non-streaming - // requests. - // - // The actual value used is the minimum of the value specified here and the - // value set by the application via the gRPC client API. If either one is - // not set, then the other will be used. If neither is set, then the - // built-in default is used. - // - // If a client attempts to send an object larger than this value, it will not - // be sent and the client will see a ClientError. - // Note that 0 is a valid value, meaning that the request message - // must be empty. - MaxRequestMessageBytes *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_request_message_bytes,json=maxRequestMessageBytes,proto3" json:"max_request_message_bytes,omitempty"` - // The maximum allowed payload size for an individual response or object in a - // stream (server->client) in bytes. The size which is measured is the - // serialized payload after per-message compression (but before stream - // compression) in bytes. This applies both to streaming and non-streaming - // requests. - // - // The actual value used is the minimum of the value specified here and the - // value set by the application via the gRPC client API. If either one is - // not set, then the other will be used. If neither is set, then the - // built-in default is used. - // - // If a server attempts to send an object larger than this value, it will not - // be sent, and a ServerError will be sent to the client instead. - // Note that 0 is a valid value, meaning that the response message - // must be empty. - MaxResponseMessageBytes *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=max_response_message_bytes,json=maxResponseMessageBytes,proto3" json:"max_response_message_bytes,omitempty"` - // Only one of retry_policy or hedging_policy may be set. If neither is set, - // RPCs will not be retried or hedged. - // - // Types that are assignable to RetryOrHedgingPolicy: - // *MethodConfig_RetryPolicy_ - // *MethodConfig_HedgingPolicy_ - RetryOrHedgingPolicy isMethodConfig_RetryOrHedgingPolicy `protobuf_oneof:"retry_or_hedging_policy"` -} - -func (x *MethodConfig) Reset() { - *x = MethodConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodConfig) ProtoMessage() {} - -func (x *MethodConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodConfig.ProtoReflect.Descriptor instead. -func (*MethodConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{0} -} - -func (x *MethodConfig) GetName() []*MethodConfig_Name { - if x != nil { - return x.Name - } - return nil -} - -func (x *MethodConfig) GetWaitForReady() *wrapperspb.BoolValue { - if x != nil { - return x.WaitForReady - } - return nil -} - -func (x *MethodConfig) GetTimeout() *durationpb.Duration { - if x != nil { - return x.Timeout - } - return nil -} - -func (x *MethodConfig) GetMaxRequestMessageBytes() *wrapperspb.UInt32Value { - if x != nil { - return x.MaxRequestMessageBytes - } - return nil -} - -func (x *MethodConfig) GetMaxResponseMessageBytes() *wrapperspb.UInt32Value { - if x != nil { - return x.MaxResponseMessageBytes - } - return nil -} - -func (m *MethodConfig) GetRetryOrHedgingPolicy() isMethodConfig_RetryOrHedgingPolicy { - if m != nil { - return m.RetryOrHedgingPolicy - } - return nil -} - -func (x *MethodConfig) GetRetryPolicy() *MethodConfig_RetryPolicy { - if x, ok := x.GetRetryOrHedgingPolicy().(*MethodConfig_RetryPolicy_); ok { - return x.RetryPolicy - } - return nil -} - -func (x *MethodConfig) GetHedgingPolicy() *MethodConfig_HedgingPolicy { - if x, ok := x.GetRetryOrHedgingPolicy().(*MethodConfig_HedgingPolicy_); ok { - return x.HedgingPolicy - } - return nil -} - -type isMethodConfig_RetryOrHedgingPolicy interface { - isMethodConfig_RetryOrHedgingPolicy() -} - -type MethodConfig_RetryPolicy_ struct { - RetryPolicy *MethodConfig_RetryPolicy `protobuf:"bytes,6,opt,name=retry_policy,json=retryPolicy,proto3,oneof"` -} - -type MethodConfig_HedgingPolicy_ struct { - HedgingPolicy *MethodConfig_HedgingPolicy `protobuf:"bytes,7,opt,name=hedging_policy,json=hedgingPolicy,proto3,oneof"` -} - -func (*MethodConfig_RetryPolicy_) isMethodConfig_RetryOrHedgingPolicy() {} - -func (*MethodConfig_HedgingPolicy_) isMethodConfig_RetryOrHedgingPolicy() {} - -// Configuration for pick_first LB policy. -type PickFirstConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *PickFirstConfig) Reset() { - *x = PickFirstConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PickFirstConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PickFirstConfig) ProtoMessage() {} - -func (x *PickFirstConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PickFirstConfig.ProtoReflect.Descriptor instead. -func (*PickFirstConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{1} -} - -// Configuration for round_robin LB policy. -type RoundRobinConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *RoundRobinConfig) Reset() { - *x = RoundRobinConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RoundRobinConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RoundRobinConfig) ProtoMessage() {} - -func (x *RoundRobinConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RoundRobinConfig.ProtoReflect.Descriptor instead. -func (*RoundRobinConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{2} -} - -// Configuration for priority LB policy. -type PriorityLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Children map[string]*PriorityLoadBalancingPolicyConfig_Child `protobuf:"bytes,1,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // A list of child names in decreasing priority order - // (i.e., first element is the highest priority). - Priorities []string `protobuf:"bytes,2,rep,name=priorities,proto3" json:"priorities,omitempty"` -} - -func (x *PriorityLoadBalancingPolicyConfig) Reset() { - *x = PriorityLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PriorityLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PriorityLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *PriorityLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PriorityLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*PriorityLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3} -} - -func (x *PriorityLoadBalancingPolicyConfig) GetChildren() map[string]*PriorityLoadBalancingPolicyConfig_Child { - if x != nil { - return x.Children - } - return nil -} - -func (x *PriorityLoadBalancingPolicyConfig) GetPriorities() []string { - if x != nil { - return x.Priorities - } - return nil -} - -// Configuration for weighted_target LB policy. -type WeightedTargetLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Targets map[string]*WeightedTargetLoadBalancingPolicyConfig_Target `protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *WeightedTargetLoadBalancingPolicyConfig) Reset() { - *x = WeightedTargetLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WeightedTargetLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WeightedTargetLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *WeightedTargetLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WeightedTargetLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*WeightedTargetLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4} -} - -func (x *WeightedTargetLoadBalancingPolicyConfig) GetTargets() map[string]*WeightedTargetLoadBalancingPolicyConfig_Target { - if x != nil { - return x.Targets - } - return nil -} - -// Configuration for grpclb LB policy. -type GrpcLbConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional. What LB policy to use for routing between the backend - // addresses. If unset, defaults to round_robin. - // Currently, the only supported values are round_robin and pick_first. - // Note that this will be used both in balancer mode and in fallback mode. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` - // Optional. If specified, overrides the name of the service to be sent to - // the balancer. - ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` -} - -func (x *GrpcLbConfig) Reset() { - *x = GrpcLbConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GrpcLbConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GrpcLbConfig) ProtoMessage() {} - -func (x *GrpcLbConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GrpcLbConfig.ProtoReflect.Descriptor instead. -func (*GrpcLbConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{5} -} - -func (x *GrpcLbConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -func (x *GrpcLbConfig) GetServiceName() string { - if x != nil { - return x.ServiceName - } - return "" -} - -// Configuration for the cds LB policy. -type CdsConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` // Required. -} - -func (x *CdsConfig) Reset() { - *x = CdsConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CdsConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CdsConfig) ProtoMessage() {} - -func (x *CdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CdsConfig.ProtoReflect.Descriptor instead. -func (*CdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{6} -} - -func (x *CdsConfig) GetCluster() string { - if x != nil { - return x.Cluster - } - return "" -} - -// Configuration for xds LB policy. -type XdsConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Name of balancer to connect to. - // - // Deprecated: Do not use. - BalancerName string `protobuf:"bytes,1,opt,name=balancer_name,json=balancerName,proto3" json:"balancer_name,omitempty"` - // Optional. What LB policy to use for intra-locality routing. - // If unset, will use whatever algorithm is specified by the balancer. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` - // Optional. What LB policy to use in fallback mode. If not - // specified, defaults to round_robin. - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. - FallbackPolicy []*LoadBalancingConfig `protobuf:"bytes,3,rep,name=fallback_policy,json=fallbackPolicy,proto3" json:"fallback_policy,omitempty"` - // Optional. Name to use in EDS query. If not present, defaults to - // the server name from the target URI. - EdsServiceName string `protobuf:"bytes,4,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // LRS server to send load reports to. - // If not present, load reporting will be disabled. - // If set to the empty string, load reporting will be sent to the same - // server that we obtained CDS data from. - LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,5,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` -} - -func (x *XdsConfig) Reset() { - *x = XdsConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *XdsConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*XdsConfig) ProtoMessage() {} - -func (x *XdsConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use XdsConfig.ProtoReflect.Descriptor instead. -func (*XdsConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{7} -} - -// Deprecated: Do not use. -func (x *XdsConfig) GetBalancerName() string { - if x != nil { - return x.BalancerName - } - return "" -} - -func (x *XdsConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -func (x *XdsConfig) GetFallbackPolicy() []*LoadBalancingConfig { - if x != nil { - return x.FallbackPolicy - } - return nil -} - -func (x *XdsConfig) GetEdsServiceName() string { - if x != nil { - return x.EdsServiceName - } - return "" -} - -func (x *XdsConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { - if x != nil { - return x.LrsLoadReportingServerName - } - return nil -} - -// Configuration for eds LB policy. -type EdsLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Cluster name. Required. - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - // EDS service name, as returned in CDS. - // May be unset if not specified in CDS. - EdsServiceName string `protobuf:"bytes,2,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // Server to send load reports to. - // If unset, no load reporting is done. - // If set to empty string, load reporting will be sent to the same - // server as we are getting xds data from. - LrsLoadReportingServerName *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` - // Locality-picking policy. - // This policy's config is expected to be in the format used - // by the weighted_target policy. Note that the config should include - // an empty value for the "targets" field; that empty value will be - // replaced by one that is dynamically generated based on the EDS data. - // Optional; defaults to "weighted_target". - LocalityPickingPolicy []*LoadBalancingConfig `protobuf:"bytes,4,rep,name=locality_picking_policy,json=localityPickingPolicy,proto3" json:"locality_picking_policy,omitempty"` - // Endpoint-picking policy. - // This will be configured as the policy for each child in the - // locality-policy's config. - // Optional; defaults to "round_robin". - EndpointPickingPolicy []*LoadBalancingConfig `protobuf:"bytes,5,rep,name=endpoint_picking_policy,json=endpointPickingPolicy,proto3" json:"endpoint_picking_policy,omitempty"` -} - -func (x *EdsLoadBalancingPolicyConfig) Reset() { - *x = EdsLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *EdsLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EdsLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *EdsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EdsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*EdsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{8} -} - -func (x *EdsLoadBalancingPolicyConfig) GetCluster() string { - if x != nil { - return x.Cluster - } - return "" -} - -func (x *EdsLoadBalancingPolicyConfig) GetEdsServiceName() string { - if x != nil { - return x.EdsServiceName - } - return "" -} - -func (x *EdsLoadBalancingPolicyConfig) GetLrsLoadReportingServerName() *wrapperspb.StringValue { - if x != nil { - return x.LrsLoadReportingServerName - } - return nil -} - -func (x *EdsLoadBalancingPolicyConfig) GetLocalityPickingPolicy() []*LoadBalancingConfig { - if x != nil { - return x.LocalityPickingPolicy - } - return nil -} - -func (x *EdsLoadBalancingPolicyConfig) GetEndpointPickingPolicy() []*LoadBalancingConfig { - if x != nil { - return x.EndpointPickingPolicy - } - return nil -} - -// Configuration for lrs LB policy. -type LrsLoadBalancingPolicyConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Cluster name. Required. - ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` - // EDS service name, as returned in CDS. - // May be unset if not specified in CDS. - EdsServiceName string `protobuf:"bytes,2,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"` - // Server to send load reports to. Required. - // If set to empty string, load reporting will be sent to the same - // server as we are getting xds data from. - LrsLoadReportingServerName string `protobuf:"bytes,3,opt,name=lrs_load_reporting_server_name,json=lrsLoadReportingServerName,proto3" json:"lrs_load_reporting_server_name,omitempty"` - Locality *LrsLoadBalancingPolicyConfig_Locality `protobuf:"bytes,4,opt,name=locality,proto3" json:"locality,omitempty"` - // Endpoint-picking policy. - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,5,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` -} - -func (x *LrsLoadBalancingPolicyConfig) Reset() { - *x = LrsLoadBalancingPolicyConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LrsLoadBalancingPolicyConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LrsLoadBalancingPolicyConfig) ProtoMessage() {} - -func (x *LrsLoadBalancingPolicyConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LrsLoadBalancingPolicyConfig.ProtoReflect.Descriptor instead. -func (*LrsLoadBalancingPolicyConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9} -} - -func (x *LrsLoadBalancingPolicyConfig) GetClusterName() string { - if x != nil { - return x.ClusterName - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig) GetEdsServiceName() string { - if x != nil { - return x.EdsServiceName - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig) GetLrsLoadReportingServerName() string { - if x != nil { - return x.LrsLoadReportingServerName - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig) GetLocality() *LrsLoadBalancingPolicyConfig_Locality { - if x != nil { - return x.Locality - } - return nil -} - -func (x *LrsLoadBalancingPolicyConfig) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -// Selects LB policy and provides corresponding configuration. -// -// In general, all instances of this field should be repeated. Clients will -// iterate through the list in order and stop at the first policy that they -// support. This allows the service config to specify custom policies that may -// not be known to all clients. -// -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. -type LoadBalancingConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Exactly one LB policy may be configured. - // - // Types that are assignable to Policy: - // *LoadBalancingConfig_PickFirst - // *LoadBalancingConfig_RoundRobin - // *LoadBalancingConfig_Grpclb - // *LoadBalancingConfig_Priority - // *LoadBalancingConfig_WeightedTarget - // *LoadBalancingConfig_Cds - // *LoadBalancingConfig_Eds - // *LoadBalancingConfig_Lrs - // *LoadBalancingConfig_Xds - // *LoadBalancingConfig_XdsExperimental - Policy isLoadBalancingConfig_Policy `protobuf_oneof:"policy"` -} - -func (x *LoadBalancingConfig) Reset() { - *x = LoadBalancingConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LoadBalancingConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LoadBalancingConfig) ProtoMessage() {} - -func (x *LoadBalancingConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LoadBalancingConfig.ProtoReflect.Descriptor instead. -func (*LoadBalancingConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{10} -} - -func (m *LoadBalancingConfig) GetPolicy() isLoadBalancingConfig_Policy { - if m != nil { - return m.Policy - } - return nil -} - -func (x *LoadBalancingConfig) GetPickFirst() *PickFirstConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_PickFirst); ok { - return x.PickFirst - } - return nil -} - -func (x *LoadBalancingConfig) GetRoundRobin() *RoundRobinConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_RoundRobin); ok { - return x.RoundRobin - } - return nil -} - -func (x *LoadBalancingConfig) GetGrpclb() *GrpcLbConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Grpclb); ok { - return x.Grpclb - } - return nil -} - -func (x *LoadBalancingConfig) GetPriority() *PriorityLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Priority); ok { - return x.Priority - } - return nil -} - -func (x *LoadBalancingConfig) GetWeightedTarget() *WeightedTargetLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_WeightedTarget); ok { - return x.WeightedTarget - } - return nil -} - -func (x *LoadBalancingConfig) GetCds() *CdsConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Cds); ok { - return x.Cds - } - return nil -} - -func (x *LoadBalancingConfig) GetEds() *EdsLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Eds); ok { - return x.Eds - } - return nil -} - -func (x *LoadBalancingConfig) GetLrs() *LrsLoadBalancingPolicyConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Lrs); ok { - return x.Lrs - } - return nil -} - -// Deprecated: Do not use. -func (x *LoadBalancingConfig) GetXds() *XdsConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_Xds); ok { - return x.Xds - } - return nil -} - -// Deprecated: Do not use. -func (x *LoadBalancingConfig) GetXdsExperimental() *XdsConfig { - if x, ok := x.GetPolicy().(*LoadBalancingConfig_XdsExperimental); ok { - return x.XdsExperimental - } - return nil -} - -type isLoadBalancingConfig_Policy interface { - isLoadBalancingConfig_Policy() -} - -type LoadBalancingConfig_PickFirst struct { - PickFirst *PickFirstConfig `protobuf:"bytes,4,opt,name=pick_first,proto3,oneof"` -} - -type LoadBalancingConfig_RoundRobin struct { - RoundRobin *RoundRobinConfig `protobuf:"bytes,1,opt,name=round_robin,proto3,oneof"` -} - -type LoadBalancingConfig_Grpclb struct { - // gRPC lookaside load balancing. - // This will eventually be deprecated by the new xDS-based local - // balancing policy. - Grpclb *GrpcLbConfig `protobuf:"bytes,3,opt,name=grpclb,proto3,oneof"` -} - -type LoadBalancingConfig_Priority struct { - Priority *PriorityLoadBalancingPolicyConfig `protobuf:"bytes,9,opt,name=priority,proto3,oneof"` -} - -type LoadBalancingConfig_WeightedTarget struct { - WeightedTarget *WeightedTargetLoadBalancingPolicyConfig `protobuf:"bytes,10,opt,name=weighted_target,json=weightedTarget,proto3,oneof"` -} - -type LoadBalancingConfig_Cds struct { - // EXPERIMENTAL -- DO NOT USE - // xDS-based load balancing. - // The policy is known as xds_experimental while it is under development. - // It will be renamed to xds once it is ready for public use. - Cds *CdsConfig `protobuf:"bytes,6,opt,name=cds,proto3,oneof"` -} - -type LoadBalancingConfig_Eds struct { - Eds *EdsLoadBalancingPolicyConfig `protobuf:"bytes,7,opt,name=eds,proto3,oneof"` -} - -type LoadBalancingConfig_Lrs struct { - Lrs *LrsLoadBalancingPolicyConfig `protobuf:"bytes,8,opt,name=lrs,proto3,oneof"` -} - -type LoadBalancingConfig_Xds struct { - // Deprecated: Do not use. - Xds *XdsConfig `protobuf:"bytes,2,opt,name=xds,proto3,oneof"` -} - -type LoadBalancingConfig_XdsExperimental struct { - // TODO(rekarthik): Deprecate this field after the xds policy - // is ready for public use. - // - // Deprecated: Do not use. - XdsExperimental *XdsConfig `protobuf:"bytes,5,opt,name=xds_experimental,proto3,oneof"` -} - -func (*LoadBalancingConfig_PickFirst) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_RoundRobin) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_Grpclb) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_Priority) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_WeightedTarget) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_Cds) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_Eds) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_Lrs) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_Xds) isLoadBalancingConfig_Policy() {} - -func (*LoadBalancingConfig_XdsExperimental) isLoadBalancingConfig_Policy() {} - -// A ServiceConfig represents information about a service but is not specific to -// any name resolver. -type ServiceConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated: Do not use. - LoadBalancingPolicy ServiceConfig_LoadBalancingPolicy `protobuf:"varint,1,opt,name=load_balancing_policy,json=loadBalancingPolicy,proto3,enum=grpc.service_config.ServiceConfig_LoadBalancingPolicy" json:"load_balancing_policy,omitempty"` - // Multiple LB policies can be specified; clients will iterate through - // the list in order and stop at the first policy that they support. If none - // are supported, the service config is considered invalid. - LoadBalancingConfig []*LoadBalancingConfig `protobuf:"bytes,4,rep,name=load_balancing_config,json=loadBalancingConfig,proto3" json:"load_balancing_config,omitempty"` - // Per-method configuration. - MethodConfig []*MethodConfig `protobuf:"bytes,2,rep,name=method_config,json=methodConfig,proto3" json:"method_config,omitempty"` - RetryThrottling *ServiceConfig_RetryThrottlingPolicy `protobuf:"bytes,3,opt,name=retry_throttling,json=retryThrottling,proto3" json:"retry_throttling,omitempty"` - HealthCheckConfig *ServiceConfig_HealthCheckConfig `protobuf:"bytes,5,opt,name=health_check_config,json=healthCheckConfig,proto3" json:"health_check_config,omitempty"` -} - -func (x *ServiceConfig) Reset() { - *x = ServiceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConfig) ProtoMessage() {} - -func (x *ServiceConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConfig.ProtoReflect.Descriptor instead. -func (*ServiceConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11} -} - -// Deprecated: Do not use. -func (x *ServiceConfig) GetLoadBalancingPolicy() ServiceConfig_LoadBalancingPolicy { - if x != nil { - return x.LoadBalancingPolicy - } - return ServiceConfig_UNSPECIFIED -} - -func (x *ServiceConfig) GetLoadBalancingConfig() []*LoadBalancingConfig { - if x != nil { - return x.LoadBalancingConfig - } - return nil -} - -func (x *ServiceConfig) GetMethodConfig() []*MethodConfig { - if x != nil { - return x.MethodConfig - } - return nil -} - -func (x *ServiceConfig) GetRetryThrottling() *ServiceConfig_RetryThrottlingPolicy { - if x != nil { - return x.RetryThrottling - } - return nil -} - -func (x *ServiceConfig) GetHealthCheckConfig() *ServiceConfig_HealthCheckConfig { - if x != nil { - return x.HealthCheckConfig - } - return nil -} - -// The names of the methods to which this configuration applies. -// - MethodConfig without names (empty list) will be skipped. -// - Each name entry must be unique across the entire ServiceConfig. -// - If the 'method' field is empty, this MethodConfig specifies the defaults -// for all methods for the specified service. -// - If the 'service' field is empty, the 'method' field must be empty, and -// this MethodConfig specifies the default for all methods (it's the default -// config). -// -// When determining which MethodConfig to use for a given RPC, the most -// specific match wins. For example, let's say that the service config -// contains the following MethodConfig entries: -// -// method_config { name { } ... } -// method_config { name { service: "MyService" } ... } -// method_config { name { service: "MyService" method: "Foo" } ... } -// -// MyService/Foo will use the third entry, because it exactly matches the -// service and method name. MyService/Bar will use the second entry, because -// it provides the default for all methods of MyService. AnotherService/Baz -// will use the first entry, because it doesn't match the other two. -// -// In JSON representation, value "", value `null`, and not present are the -// same. The following are the same Name: -// - { "service": "s" } -// - { "service": "s", "method": null } -// - { "service": "s", "method": "" } -type MethodConfig_Name struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` // Required. Includes proto package name. - Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` -} - -func (x *MethodConfig_Name) Reset() { - *x = MethodConfig_Name{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodConfig_Name) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodConfig_Name) ProtoMessage() {} - -func (x *MethodConfig_Name) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodConfig_Name.ProtoReflect.Descriptor instead. -func (*MethodConfig_Name) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *MethodConfig_Name) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -func (x *MethodConfig_Name) GetMethod() string { - if x != nil { - return x.Method - } - return "" -} - -// The retry policy for outgoing RPCs. -type MethodConfig_RetryPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The maximum number of RPC attempts, including the original attempt. - // - // This field is required and must be greater than 1. - // Any value greater than 5 will be treated as if it were 5. - MaxAttempts uint32 `protobuf:"varint,1,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` - // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initial_backoff). In general, the nth attempt will occur at - // random(0, - // min(initial_backoff*backoff_multiplier**(n-1), max_backoff)). - // Required. Must be greater than zero. - InitialBackoff *durationpb.Duration `protobuf:"bytes,2,opt,name=initial_backoff,json=initialBackoff,proto3" json:"initial_backoff,omitempty"` - // Required. Must be greater than zero. - MaxBackoff *durationpb.Duration `protobuf:"bytes,3,opt,name=max_backoff,json=maxBackoff,proto3" json:"max_backoff,omitempty"` - BackoffMultiplier float32 `protobuf:"fixed32,4,opt,name=backoff_multiplier,json=backoffMultiplier,proto3" json:"backoff_multiplier,omitempty"` // Required. Must be greater than zero. - // The set of status codes which may be retried. - // - // This field is required and must be non-empty. - RetryableStatusCodes []code.Code `protobuf:"varint,5,rep,packed,name=retryable_status_codes,json=retryableStatusCodes,proto3,enum=google.rpc.Code" json:"retryable_status_codes,omitempty"` -} - -func (x *MethodConfig_RetryPolicy) Reset() { - *x = MethodConfig_RetryPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodConfig_RetryPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodConfig_RetryPolicy) ProtoMessage() {} - -func (x *MethodConfig_RetryPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodConfig_RetryPolicy.ProtoReflect.Descriptor instead. -func (*MethodConfig_RetryPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{0, 1} -} - -func (x *MethodConfig_RetryPolicy) GetMaxAttempts() uint32 { - if x != nil { - return x.MaxAttempts - } - return 0 -} - -func (x *MethodConfig_RetryPolicy) GetInitialBackoff() *durationpb.Duration { - if x != nil { - return x.InitialBackoff - } - return nil -} - -func (x *MethodConfig_RetryPolicy) GetMaxBackoff() *durationpb.Duration { - if x != nil { - return x.MaxBackoff - } - return nil -} - -func (x *MethodConfig_RetryPolicy) GetBackoffMultiplier() float32 { - if x != nil { - return x.BackoffMultiplier - } - return 0 -} - -func (x *MethodConfig_RetryPolicy) GetRetryableStatusCodes() []code.Code { - if x != nil { - return x.RetryableStatusCodes - } - return nil -} - -// The hedging policy for outgoing RPCs. Hedged RPCs may execute more than -// once on the server, so only idempotent methods should specify a hedging -// policy. -type MethodConfig_HedgingPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The hedging policy will send up to max_requests RPCs. - // This number represents the total number of all attempts, including - // the original attempt. - // - // This field is required and must be greater than 1. - // Any value greater than 5 will be treated as if it were 5. - MaxAttempts uint32 `protobuf:"varint,1,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` - // The first RPC will be sent immediately, but the max_requests-1 subsequent - // hedged RPCs will be sent at intervals of every hedging_delay. Set this - // to 0 to immediately send all max_requests RPCs. - HedgingDelay *durationpb.Duration `protobuf:"bytes,2,opt,name=hedging_delay,json=hedgingDelay,proto3" json:"hedging_delay,omitempty"` - // The set of status codes which indicate other hedged RPCs may still - // succeed. If a non-fatal status code is returned by the server, hedged - // RPCs will continue. Otherwise, outstanding requests will be canceled and - // the error returned to the client application layer. - // - // This field is optional. - NonFatalStatusCodes []code.Code `protobuf:"varint,3,rep,packed,name=non_fatal_status_codes,json=nonFatalStatusCodes,proto3,enum=google.rpc.Code" json:"non_fatal_status_codes,omitempty"` -} - -func (x *MethodConfig_HedgingPolicy) Reset() { - *x = MethodConfig_HedgingPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *MethodConfig_HedgingPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodConfig_HedgingPolicy) ProtoMessage() {} - -func (x *MethodConfig_HedgingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodConfig_HedgingPolicy.ProtoReflect.Descriptor instead. -func (*MethodConfig_HedgingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{0, 2} -} - -func (x *MethodConfig_HedgingPolicy) GetMaxAttempts() uint32 { - if x != nil { - return x.MaxAttempts - } - return 0 -} - -func (x *MethodConfig_HedgingPolicy) GetHedgingDelay() *durationpb.Duration { - if x != nil { - return x.HedgingDelay - } - return nil -} - -func (x *MethodConfig_HedgingPolicy) GetNonFatalStatusCodes() []code.Code { - if x != nil { - return x.NonFatalStatusCodes - } - return nil -} - -// A map of name to child policy configuration. -// The names are used to allow the priority policy to update -// existing child policies instead of creating new ones every -// time it receives a config update. -type PriorityLoadBalancingPolicyConfig_Child struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Config []*LoadBalancingConfig `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty"` -} - -func (x *PriorityLoadBalancingPolicyConfig_Child) Reset() { - *x = PriorityLoadBalancingPolicyConfig_Child{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PriorityLoadBalancingPolicyConfig_Child) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PriorityLoadBalancingPolicyConfig_Child) ProtoMessage() {} - -func (x *PriorityLoadBalancingPolicyConfig_Child) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PriorityLoadBalancingPolicyConfig_Child.ProtoReflect.Descriptor instead. -func (*PriorityLoadBalancingPolicyConfig_Child) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *PriorityLoadBalancingPolicyConfig_Child) GetConfig() []*LoadBalancingConfig { - if x != nil { - return x.Config - } - return nil -} - -type WeightedTargetLoadBalancingPolicyConfig_Target struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Weight uint32 `protobuf:"varint,1,opt,name=weight,proto3" json:"weight,omitempty"` - ChildPolicy []*LoadBalancingConfig `protobuf:"bytes,2,rep,name=child_policy,json=childPolicy,proto3" json:"child_policy,omitempty"` -} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) Reset() { - *x = WeightedTargetLoadBalancingPolicyConfig_Target{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WeightedTargetLoadBalancingPolicyConfig_Target) ProtoMessage() {} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WeightedTargetLoadBalancingPolicyConfig_Target.ProtoReflect.Descriptor instead. -func (*WeightedTargetLoadBalancingPolicyConfig_Target) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) GetWeight() uint32 { - if x != nil { - return x.Weight - } - return 0 -} - -func (x *WeightedTargetLoadBalancingPolicyConfig_Target) GetChildPolicy() []*LoadBalancingConfig { - if x != nil { - return x.ChildPolicy - } - return nil -} - -// The locality for which this policy will report load. Required. -type LrsLoadBalancingPolicyConfig_Locality struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"` - Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"` - Subzone string `protobuf:"bytes,3,opt,name=subzone,proto3" json:"subzone,omitempty"` -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) Reset() { - *x = LrsLoadBalancingPolicyConfig_Locality{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*LrsLoadBalancingPolicyConfig_Locality) ProtoMessage() {} - -func (x *LrsLoadBalancingPolicyConfig_Locality) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use LrsLoadBalancingPolicyConfig_Locality.ProtoReflect.Descriptor instead. -func (*LrsLoadBalancingPolicyConfig_Locality) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{9, 0} -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) GetRegion() string { - if x != nil { - return x.Region - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) GetZone() string { - if x != nil { - return x.Zone - } - return "" -} - -func (x *LrsLoadBalancingPolicyConfig_Locality) GetSubzone() string { - if x != nil { - return x.Subzone - } - return "" -} - -// If a RetryThrottlingPolicy is provided, gRPC will automatically throttle -// retry attempts and hedged RPCs when the client's ratio of failures to -// successes exceeds a threshold. -// -// For each server name, the gRPC client will maintain a token_count which is -// initially set to max_tokens. Every outgoing RPC (regardless of service or -// method invoked) will change token_count as follows: -// -// - Every failed RPC will decrement the token_count by 1. -// - Every successful RPC will increment the token_count by token_ratio. -// -// If token_count is less than or equal to max_tokens / 2, then RPCs will not -// be retried and hedged RPCs will not be sent. -type ServiceConfig_RetryThrottlingPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The number of tokens starts at max_tokens. The token_count will always be - // between 0 and max_tokens. - // - // This field is required and must be greater than zero. - MaxTokens uint32 `protobuf:"varint,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"` - // The amount of tokens to add on each successful RPC. Typically this will - // be some number between 0 and 1, e.g., 0.1. - // - // This field is required and must be greater than zero. Up to 3 decimal - // places are supported. - TokenRatio float32 `protobuf:"fixed32,2,opt,name=token_ratio,json=tokenRatio,proto3" json:"token_ratio,omitempty"` -} - -func (x *ServiceConfig_RetryThrottlingPolicy) Reset() { - *x = ServiceConfig_RetryThrottlingPolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConfig_RetryThrottlingPolicy) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConfig_RetryThrottlingPolicy) ProtoMessage() {} - -func (x *ServiceConfig_RetryThrottlingPolicy) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConfig_RetryThrottlingPolicy.ProtoReflect.Descriptor instead. -func (*ServiceConfig_RetryThrottlingPolicy) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 0} -} - -func (x *ServiceConfig_RetryThrottlingPolicy) GetMaxTokens() uint32 { - if x != nil { - return x.MaxTokens - } - return 0 -} - -func (x *ServiceConfig_RetryThrottlingPolicy) GetTokenRatio() float32 { - if x != nil { - return x.TokenRatio - } - return 0 -} - -type ServiceConfig_HealthCheckConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Service name to use in the health-checking request. - ServiceName *wrapperspb.StringValue `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` -} - -func (x *ServiceConfig_HealthCheckConfig) Reset() { - *x = ServiceConfig_HealthCheckConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceConfig_HealthCheckConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceConfig_HealthCheckConfig) ProtoMessage() {} - -func (x *ServiceConfig_HealthCheckConfig) ProtoReflect() protoreflect.Message { - mi := &file_grpc_service_config_service_config_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceConfig_HealthCheckConfig.ProtoReflect.Descriptor instead. -func (*ServiceConfig_HealthCheckConfig) Descriptor() ([]byte, []int) { - return file_grpc_service_config_service_config_proto_rawDescGZIP(), []int{11, 1} -} - -func (x *ServiceConfig_HealthCheckConfig) GetServiceName() *wrapperspb.StringValue { - if x != nil { - return x.ServiceName - } - return nil -} - -var File_grpc_service_config_service_config_proto protoreflect.FileDescriptor - -var file_grpc_service_config_service_config_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xde, 0x08, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, - 0x72, 0x65, 0x61, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, - 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, - 0x52, 0x65, 0x61, 0x64, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x19, 0x6d, 0x61, - 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x16, 0x6d, 0x61, 0x78, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x59, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x52, - 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x58, 0x0a, 0x0e, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, - 0x64, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x68, - 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x38, 0x0a, 0x04, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, - 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, - 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, - 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x3a, 0x0a, - 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6d, - 0x61, 0x78, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x2d, 0x0a, 0x12, 0x62, 0x61, 0x63, - 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x75, - 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, - 0x1a, 0xb9, 0x01, 0x0a, 0x0d, 0x48, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x41, 0x74, 0x74, - 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x3e, 0x0a, 0x0d, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, - 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x45, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x74, - 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x6e, 0x6f, 0x6e, 0x46, 0x61, 0x74, 0x61, - 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x19, 0x0a, 0x17, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6f, 0x72, 0x5f, 0x68, 0x65, 0x64, 0x67, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x50, 0x69, 0x63, 0x6b, 0x46, - 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x12, 0x0a, 0x10, 0x52, 0x6f, - 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xeb, - 0x02, 0x0a, 0x21, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, - 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x12, - 0x40, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x1a, 0x79, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x52, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x68, 0x69, 0x6c, - 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x02, 0x0a, - 0x27, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x63, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0x6d, 0x0a, - 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, - 0x4b, 0x0a, 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x7f, 0x0a, 0x0c, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x59, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7e, 0x0a, - 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, - 0x0c, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x25, 0x0a, - 0x09, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x22, 0xe0, 0x02, 0x0a, 0x09, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0d, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x62, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x63, - 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, 0x66, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x66, 0x61, 0x6c, - 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x65, - 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, - 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, - 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x88, 0x03, 0x0a, 0x1c, 0x45, 0x64, 0x73, 0x4c, - 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, - 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, 0x0a, 0x1e, - 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x60, - 0x0a, 0x17, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x69, 0x63, 0x6b, 0x69, - 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x69, 0x74, 0x79, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x60, 0x0a, 0x17, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x70, 0x69, 0x63, - 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x65, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x50, 0x69, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x22, 0xa6, 0x03, 0x0a, 0x1c, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x42, 0x0a, 0x1e, 0x6c, 0x72, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x6c, 0x72, 0x73, 0x4c, 0x6f, 0x61, - 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x56, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x72, 0x73, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4b, 0x0a, 0x0c, - 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x50, 0x0a, 0x08, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, - 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x7a, 0x6f, 0x6e, 0x65, 0x22, 0xfa, 0x05, 0x0a, 0x13, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, - 0x63, 0x6b, 0x46, 0x69, 0x72, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x0a, 0x70, 0x69, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0b, 0x72, - 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x6f, 0x62, 0x69, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x6e, 0x64, - 0x5f, 0x72, 0x6f, 0x62, 0x69, 0x6e, 0x12, 0x3b, 0x0a, 0x06, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x72, 0x70, - 0x63, 0x4c, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x67, 0x72, 0x70, - 0x63, 0x6c, 0x62, 0x12, 0x54, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x72, 0x69, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x0f, 0x77, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, - 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x48, 0x00, 0x52, 0x0e, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x12, 0x32, 0x0a, 0x03, 0x63, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x03, 0x63, 0x64, 0x73, 0x12, 0x45, 0x0a, 0x03, 0x65, 0x64, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x64, 0x73, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x03, 0x65, 0x64, 0x73, 0x12, 0x45, 0x0a, - 0x03, 0x6c, 0x72, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x4c, 0x72, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, - 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, - 0x03, 0x6c, 0x72, 0x73, 0x12, 0x36, 0x0a, 0x03, 0x78, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x03, 0x78, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x10, - 0x78, 0x64, 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x58, 0x64, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x10, 0x78, 0x64, - 0x73, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x08, - 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd8, 0x05, 0x0a, 0x0d, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6e, 0x0a, 0x15, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x5c, 0x0a, 0x15, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x13, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x63, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x64, 0x0a, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x15, 0x52, - 0x65, 0x74, 0x72, 0x79, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x52, - 0x61, 0x74, 0x69, 0x6f, 0x1a, 0x54, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3f, 0x0a, 0x0c, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x37, 0x0a, 0x13, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x5f, 0x52, 0x4f, 0x42, 0x49, - 0x4e, 0x10, 0x01, 0x42, 0x2d, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x12, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_service_config_service_config_proto_rawDescOnce sync.Once - file_grpc_service_config_service_config_proto_rawDescData = file_grpc_service_config_service_config_proto_rawDesc -) - -func file_grpc_service_config_service_config_proto_rawDescGZIP() []byte { - file_grpc_service_config_service_config_proto_rawDescOnce.Do(func() { - file_grpc_service_config_service_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_service_config_service_config_proto_rawDescData) - }) - return file_grpc_service_config_service_config_proto_rawDescData -} - -var file_grpc_service_config_service_config_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_service_config_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 22) -var file_grpc_service_config_service_config_proto_goTypes = []interface{}{ - (ServiceConfig_LoadBalancingPolicy)(0), // 0: grpc.service_config.ServiceConfig.LoadBalancingPolicy - (*MethodConfig)(nil), // 1: grpc.service_config.MethodConfig - (*PickFirstConfig)(nil), // 2: grpc.service_config.PickFirstConfig - (*RoundRobinConfig)(nil), // 3: grpc.service_config.RoundRobinConfig - (*PriorityLoadBalancingPolicyConfig)(nil), // 4: grpc.service_config.PriorityLoadBalancingPolicyConfig - (*WeightedTargetLoadBalancingPolicyConfig)(nil), // 5: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - (*GrpcLbConfig)(nil), // 6: grpc.service_config.GrpcLbConfig - (*CdsConfig)(nil), // 7: grpc.service_config.CdsConfig - (*XdsConfig)(nil), // 8: grpc.service_config.XdsConfig - (*EdsLoadBalancingPolicyConfig)(nil), // 9: grpc.service_config.EdsLoadBalancingPolicyConfig - (*LrsLoadBalancingPolicyConfig)(nil), // 10: grpc.service_config.LrsLoadBalancingPolicyConfig - (*LoadBalancingConfig)(nil), // 11: grpc.service_config.LoadBalancingConfig - (*ServiceConfig)(nil), // 12: grpc.service_config.ServiceConfig - (*MethodConfig_Name)(nil), // 13: grpc.service_config.MethodConfig.Name - (*MethodConfig_RetryPolicy)(nil), // 14: grpc.service_config.MethodConfig.RetryPolicy - (*MethodConfig_HedgingPolicy)(nil), // 15: grpc.service_config.MethodConfig.HedgingPolicy - (*PriorityLoadBalancingPolicyConfig_Child)(nil), // 16: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - nil, // 17: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - (*WeightedTargetLoadBalancingPolicyConfig_Target)(nil), // 18: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - nil, // 19: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - (*LrsLoadBalancingPolicyConfig_Locality)(nil), // 20: grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - (*ServiceConfig_RetryThrottlingPolicy)(nil), // 21: grpc.service_config.ServiceConfig.RetryThrottlingPolicy - (*ServiceConfig_HealthCheckConfig)(nil), // 22: grpc.service_config.ServiceConfig.HealthCheckConfig - (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue - (*durationpb.Duration)(nil), // 24: google.protobuf.Duration - (*wrapperspb.UInt32Value)(nil), // 25: google.protobuf.UInt32Value - (*wrapperspb.StringValue)(nil), // 26: google.protobuf.StringValue - (code.Code)(0), // 27: google.rpc.Code -} -var file_grpc_service_config_service_config_proto_depIdxs = []int32{ - 13, // 0: grpc.service_config.MethodConfig.name:type_name -> grpc.service_config.MethodConfig.Name - 23, // 1: grpc.service_config.MethodConfig.wait_for_ready:type_name -> google.protobuf.BoolValue - 24, // 2: grpc.service_config.MethodConfig.timeout:type_name -> google.protobuf.Duration - 25, // 3: grpc.service_config.MethodConfig.max_request_message_bytes:type_name -> google.protobuf.UInt32Value - 25, // 4: grpc.service_config.MethodConfig.max_response_message_bytes:type_name -> google.protobuf.UInt32Value - 14, // 5: grpc.service_config.MethodConfig.retry_policy:type_name -> grpc.service_config.MethodConfig.RetryPolicy - 15, // 6: grpc.service_config.MethodConfig.hedging_policy:type_name -> grpc.service_config.MethodConfig.HedgingPolicy - 17, // 7: grpc.service_config.PriorityLoadBalancingPolicyConfig.children:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry - 19, // 8: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.targets:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry - 11, // 9: grpc.service_config.GrpcLbConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 11, // 10: grpc.service_config.XdsConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 11, // 11: grpc.service_config.XdsConfig.fallback_policy:type_name -> grpc.service_config.LoadBalancingConfig - 26, // 12: grpc.service_config.XdsConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 26, // 13: grpc.service_config.EdsLoadBalancingPolicyConfig.lrs_load_reporting_server_name:type_name -> google.protobuf.StringValue - 11, // 14: grpc.service_config.EdsLoadBalancingPolicyConfig.locality_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 11, // 15: grpc.service_config.EdsLoadBalancingPolicyConfig.endpoint_picking_policy:type_name -> grpc.service_config.LoadBalancingConfig - 20, // 16: grpc.service_config.LrsLoadBalancingPolicyConfig.locality:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig.Locality - 11, // 17: grpc.service_config.LrsLoadBalancingPolicyConfig.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 2, // 18: grpc.service_config.LoadBalancingConfig.pick_first:type_name -> grpc.service_config.PickFirstConfig - 3, // 19: grpc.service_config.LoadBalancingConfig.round_robin:type_name -> grpc.service_config.RoundRobinConfig - 6, // 20: grpc.service_config.LoadBalancingConfig.grpclb:type_name -> grpc.service_config.GrpcLbConfig - 4, // 21: grpc.service_config.LoadBalancingConfig.priority:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig - 5, // 22: grpc.service_config.LoadBalancingConfig.weighted_target:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig - 7, // 23: grpc.service_config.LoadBalancingConfig.cds:type_name -> grpc.service_config.CdsConfig - 9, // 24: grpc.service_config.LoadBalancingConfig.eds:type_name -> grpc.service_config.EdsLoadBalancingPolicyConfig - 10, // 25: grpc.service_config.LoadBalancingConfig.lrs:type_name -> grpc.service_config.LrsLoadBalancingPolicyConfig - 8, // 26: grpc.service_config.LoadBalancingConfig.xds:type_name -> grpc.service_config.XdsConfig - 8, // 27: grpc.service_config.LoadBalancingConfig.xds_experimental:type_name -> grpc.service_config.XdsConfig - 0, // 28: grpc.service_config.ServiceConfig.load_balancing_policy:type_name -> grpc.service_config.ServiceConfig.LoadBalancingPolicy - 11, // 29: grpc.service_config.ServiceConfig.load_balancing_config:type_name -> grpc.service_config.LoadBalancingConfig - 1, // 30: grpc.service_config.ServiceConfig.method_config:type_name -> grpc.service_config.MethodConfig - 21, // 31: grpc.service_config.ServiceConfig.retry_throttling:type_name -> grpc.service_config.ServiceConfig.RetryThrottlingPolicy - 22, // 32: grpc.service_config.ServiceConfig.health_check_config:type_name -> grpc.service_config.ServiceConfig.HealthCheckConfig - 24, // 33: grpc.service_config.MethodConfig.RetryPolicy.initial_backoff:type_name -> google.protobuf.Duration - 24, // 34: grpc.service_config.MethodConfig.RetryPolicy.max_backoff:type_name -> google.protobuf.Duration - 27, // 35: grpc.service_config.MethodConfig.RetryPolicy.retryable_status_codes:type_name -> google.rpc.Code - 24, // 36: grpc.service_config.MethodConfig.HedgingPolicy.hedging_delay:type_name -> google.protobuf.Duration - 27, // 37: grpc.service_config.MethodConfig.HedgingPolicy.non_fatal_status_codes:type_name -> google.rpc.Code - 11, // 38: grpc.service_config.PriorityLoadBalancingPolicyConfig.Child.config:type_name -> grpc.service_config.LoadBalancingConfig - 16, // 39: grpc.service_config.PriorityLoadBalancingPolicyConfig.ChildrenEntry.value:type_name -> grpc.service_config.PriorityLoadBalancingPolicyConfig.Child - 11, // 40: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target.child_policy:type_name -> grpc.service_config.LoadBalancingConfig - 18, // 41: grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.TargetsEntry.value:type_name -> grpc.service_config.WeightedTargetLoadBalancingPolicyConfig.Target - 26, // 42: grpc.service_config.ServiceConfig.HealthCheckConfig.service_name:type_name -> google.protobuf.StringValue - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name -} - -func init() { file_grpc_service_config_service_config_proto_init() } -func file_grpc_service_config_service_config_proto_init() { - if File_grpc_service_config_service_config_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_service_config_service_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PickFirstConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RoundRobinConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WeightedTargetLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GrpcLbConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CdsConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*XdsConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EdsLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LrsLoadBalancingPolicyConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LoadBalancingConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_Name); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_RetryPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MethodConfig_HedgingPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PriorityLoadBalancingPolicyConfig_Child); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WeightedTargetLoadBalancingPolicyConfig_Target); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*LrsLoadBalancingPolicyConfig_Locality); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig_RetryThrottlingPolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_service_config_service_config_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConfig_HealthCheckConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_service_config_service_config_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*MethodConfig_RetryPolicy_)(nil), - (*MethodConfig_HedgingPolicy_)(nil), - } - file_grpc_service_config_service_config_proto_msgTypes[10].OneofWrappers = []interface{}{ - (*LoadBalancingConfig_PickFirst)(nil), - (*LoadBalancingConfig_RoundRobin)(nil), - (*LoadBalancingConfig_Grpclb)(nil), - (*LoadBalancingConfig_Priority)(nil), - (*LoadBalancingConfig_WeightedTarget)(nil), - (*LoadBalancingConfig_Cds)(nil), - (*LoadBalancingConfig_Eds)(nil), - (*LoadBalancingConfig_Lrs)(nil), - (*LoadBalancingConfig_Xds)(nil), - (*LoadBalancingConfig_XdsExperimental)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_service_config_service_config_proto_rawDesc, - NumEnums: 1, - NumMessages: 22, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_grpc_service_config_service_config_proto_goTypes, - DependencyIndexes: file_grpc_service_config_service_config_proto_depIdxs, - EnumInfos: file_grpc_service_config_service_config_proto_enumTypes, - MessageInfos: file_grpc_service_config_service_config_proto_msgTypes, - }.Build() - File_grpc_service_config_service_config_proto = out.File - file_grpc_service_config_service_config_proto_rawDesc = nil - file_grpc_service_config_service_config_proto_goTypes = nil - file_grpc_service_config_service_config_proto_depIdxs = nil -} diff --git a/internal/resolver/config_selector.go b/internal/resolver/config_selector.go index 5e7f36703d4b..c7a18a948adb 100644 --- a/internal/resolver/config_selector.go +++ b/internal/resolver/config_selector.go @@ -117,9 +117,12 @@ type ClientInterceptor interface { NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) } -// ServerInterceptor is unimplementable; do not use. +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. type ServerInterceptor interface { - notDefined() + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. } type csKeyType string @@ -129,7 +132,7 @@ const csKey = csKeyType("grpc.internal.resolver.configSelector") // SetConfigSelector sets the config selector in state and returns the new // state. func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { - state.Attributes = state.Attributes.WithValues(csKey, cs) + state.Attributes = state.Attributes.WithValue(csKey, cs) return state } diff --git a/internal/resolver/config_selector_test.go b/internal/resolver/config_selector_test.go index e5a50995df11..7a8a5dbd693f 100644 --- a/internal/resolver/config_selector_test.go +++ b/internal/resolver/config_selector_test.go @@ -48,6 +48,8 @@ func (s) TestSafeConfigSelector(t *testing.T) { retChan1 := make(chan *RPCConfig) retChan2 := make(chan *RPCConfig) + defer close(retChan1) + defer close(retChan2) one := 1 two := 2 @@ -55,8 +57,8 @@ func (s) TestSafeConfigSelector(t *testing.T) { resp1 := &RPCConfig{MethodConfig: serviceconfig.MethodConfig{MaxReqSize: &one}} resp2 := &RPCConfig{MethodConfig: serviceconfig.MethodConfig{MaxReqSize: &two}} - cs1Called := make(chan struct{}) - cs2Called := make(chan struct{}) + cs1Called := make(chan struct{}, 1) + cs2Called := make(chan struct{}, 1) cs1 := &fakeConfigSelector{ selectConfig: func(r RPCInfo) (*RPCConfig, error) { @@ -110,7 +112,7 @@ func (s) TestSafeConfigSelector(t *testing.T) { cs1Done := false // set when cs2 is first called for dl := time.Now().Add(150 * time.Millisecond); !time.Now().After(dl); { - gotConfigChan := make(chan *RPCConfig) + gotConfigChan := make(chan *RPCConfig, 1) go func() { cfg, _ := scs.SelectConfig(testRPCInfo) gotConfigChan <- cfg diff --git a/internal/resolver/dns/dns_resolver.go b/internal/resolver/dns/dns_resolver.go index 304235566589..99e1e5b36c89 100644 --- a/internal/resolver/dns/dns_resolver.go +++ b/internal/resolver/dns/dns_resolver.go @@ -34,6 +34,7 @@ import ( grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" @@ -46,6 +47,13 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + func init() { resolver.Register(NewBuilder()) } @@ -54,7 +62,8 @@ const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -78,14 +87,14 @@ var ( minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -95,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -106,9 +115,10 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } @@ -132,10 +142,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -143,7 +153,6 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts d.wg.Add(1) go d.watcher() - d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } @@ -173,19 +182,22 @@ type dnsResolver struct { ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -201,28 +213,39 @@ func (d *dnsResolver) Close() { func (d *dnsResolver) watcher() { defer d.wg.Done() + backoffIndex := 1 for { - select { - case <-d.ctx.Done(): - return - case <-d.rn: - } - state, err := d.lookup() if err != nil { + // Report error to the underlying grpc.ClientConn. d.cc.ReportError(err) } else { - d.cc.UpdateState(*state) + err = d.cc.UpdateState(*state) } - // Sleep to prevent excessive re-resolutions. Incoming resolution requests - // will be queued in d.rn. - t := time.NewTimer(minDNSResRate) + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } select { - case <-t.C: case <-d.ctx.Done(): - t.Stop() + timer.Stop() return + case <-timer.C: } } } @@ -260,18 +283,14 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { return newAddrs, nil } -var filterError = func(err error) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { +func handleDNSError(err error, lookupType string) error { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). return nil } - return err -} - -func handleDNSError(err error, lookupType string) error { - err = filterError(err) if err != nil { err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) logger.Info(err) @@ -295,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -306,12 +327,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { } func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err } + newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { ip, ok := formatIP(a) if !ok { @@ -340,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -354,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -373,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/internal/resolver/dns/dns_resolver_test.go b/internal/resolver/dns/dns_resolver_test.go index 1c8469a275a7..a66ffffd3ce1 100644 --- a/internal/resolver/dns/dns_resolver_test.go +++ b/internal/resolver/dns/dns_resolver_test.go @@ -30,9 +30,13 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/balancer" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/leakcheck" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -41,13 +45,15 @@ func TestMain(m *testing.M) { // Set a non-zero duration only for tests which are actually testing that // feature. replaceDNSResRate(time.Duration(0)) // No nead to clean up since we os.Exit - replaceNetFunc(nil) // No nead to clean up since we os.Exit + overrideDefaultResolver(false) // No nead to clean up since we os.Exit code := m.Run() os.Exit(code) } const ( - txtBytesLimit = 255 + txtBytesLimit = 255 + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond ) type testClientConn struct { @@ -57,13 +63,17 @@ type testClientConn struct { state resolver.State updateStateCalls int errChan chan error + updateStateErr error } -func (t *testClientConn) UpdateState(s resolver.State) { +func (t *testClientConn) UpdateState(s resolver.State) error { t.m1.Lock() defer t.m1.Unlock() t.state = s t.updateStateCalls++ + // This error determines whether DNS Resolver actually decides to exponentially backoff or not. + // This can be any error. + return t.updateStateErr } func (t *testClientConn) getState() (resolver.State, int) { @@ -99,12 +109,12 @@ type testResolver struct { // A write to this channel is made when this resolver receives a resolution // request. Tests can rely on reading from this channel to be notified about // resolution requests instead of sleeping for a predefined period of time. - ch chan struct{} + lookupHostCh *testutils.Channel } func (tr *testResolver) LookupHost(ctx context.Context, host string) ([]string, error) { - if tr.ch != nil { - tr.ch <- struct{}{} + if tr.lookupHostCh != nil { + tr.lookupHostCh.Send(nil) } return hostLookup(host) } @@ -117,9 +127,17 @@ func (*testResolver) LookupTXT(ctx context.Context, host string) ([]string, erro return txtLookup(host) } -func replaceNetFunc(ch chan struct{}) func() { +// overrideDefaultResolver overrides the defaultResolver used by the code with +// an instance of the testResolver. pushOnLookup controls whether the +// testResolver created here pushes lookupHost events on its channel. +func overrideDefaultResolver(pushOnLookup bool) func() { oldResolver := defaultResolver - defaultResolver = &testResolver{ch: ch} + + var lookupHostCh *testutils.Channel + if pushOnLookup { + lookupHostCh = testutils.NewChannel() + } + defaultResolver = &testResolver{lookupHostCh: lookupHostCh} return func() { defaultResolver = oldResolver @@ -669,6 +687,13 @@ func TestResolve(t *testing.T) { func testDNSResolver(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string addrWant []resolver.Address @@ -709,7 +734,7 @@ func testDNSResolver(t *testing.T) { for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} - r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -725,7 +750,7 @@ func testDNSResolver(t *testing.T) { if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting") } - if !reflect.DeepEqual(a.addrWant, state.Addresses) { + if !cmp.Equal(a.addrWant, state.Addresses, cmpopts.EquateEmpty()) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrWant) } sc := scFromState(state) @@ -736,12 +761,151 @@ func testDNSResolver(t *testing.T) { } } +// DNS Resolver immediately starts polling on an error from grpc. This should continue until the ClientConn doesn't +// send back an error from updating the DNS Resolver's state. +func TestDNSResolverExponentialBackoff(t *testing.T) { + defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + timerChan := testutils.NewChannel() + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } + tests := []struct { + name string + target string + addrWant []resolver.Address + scWant string + }{ + { + "happy case default port", + "foo.bar.com", + []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}}, + generateSC("foo.bar.com"), + }, + { + "happy case specified port", + "foo.bar.com:1234", + []resolver.Address{{Addr: "1.2.3.4:1234"}, {Addr: "5.6.7.8:1234"}}, + generateSC("foo.bar.com"), + }, + { + "happy case another default port", + "srv.ipv4.single.fake", + []resolver.Address{{Addr: "2.4.6.8" + colonDefaultPort}}, + generateSC("srv.ipv4.single.fake"), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b := NewBuilder() + cc := &testClientConn{target: test.target} + // Cause ClientConn to return an error. + cc.updateStateErr = balancer.ErrBadResolverState + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", test.target))}, cc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("Error building resolver for target %v: %v", test.target, err) + } + var state resolver.State + var cnt int + for i := 0; i < 2000; i++ { + state, cnt = cc.getState() + if cnt > 0 { + break + } + time.Sleep(time.Millisecond) + } + if cnt == 0 { + t.Fatalf("UpdateState not called after 2s; aborting") + } + if !reflect.DeepEqual(test.addrWant, state.Addresses) { + t.Errorf("Resolved addresses of target: %q = %+v, want %+v", test.target, state.Addresses, test.addrWant) + } + sc := scFromState(state) + if test.scWant != sc { + t.Errorf("Resolved service config of target: %q = %+v, want %+v", test.target, sc, test.scWant) + } + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + // Cause timer to go off 10 times, and see if it calls updateState() correctly. + for i := 0; i < 10; i++ { + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + } + // Poll to see if DNS Resolver updated state the correct number of times, which allows time for the DNS Resolver to call + // ClientConn update state. + deadline := time.Now().Add(defaultTestTimeout) + for { + cc.m1.Lock() + got := cc.updateStateCalls + cc.m1.Unlock() + if got == 11 { + break + } + + if time.Now().After(deadline) { + t.Fatalf("Exponential backoff is not working as expected - should update state 11 times instead of %d", got) + } + + time.Sleep(time.Millisecond) + } + + // Update resolver.ClientConn to not return an error anymore - this should stop it from backing off. + cc.updateStateErr = nil + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + // Poll to see if DNS Resolver updated state the correct number of times, which allows time for the DNS Resolver to call + // ClientConn update state the final time. The DNS Resolver should then stop polling. + deadline = time.Now().Add(defaultTestTimeout) + for { + cc.m1.Lock() + got := cc.updateStateCalls + cc.m1.Unlock() + if got == 12 { + break + } + + if time.Now().After(deadline) { + t.Fatalf("Exponential backoff is not working as expected - should stop backing off at 12 total UpdateState calls instead of %d", got) + } + + _, err := timerChan.ReceiveOrFail() + if err { + t.Fatalf("Should not poll again after Client Conn stops returning error.") + } + + time.Sleep(time.Millisecond) + } + r.Close() + }) + } +} + func testDNSResolverWithSRV(t *testing.T) { EnableSRVLookups = true defer func() { EnableSRVLookups = false }() defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string addrWant []resolver.Address @@ -797,7 +961,7 @@ func testDNSResolverWithSRV(t *testing.T) { for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} - r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -814,7 +978,7 @@ func testDNSResolverWithSRV(t *testing.T) { if cnt == 0 { t.Fatalf("UpdateState not called after 2s; aborting") } - if !reflect.DeepEqual(a.addrWant, state.Addresses) { + if !cmp.Equal(a.addrWant, state.Addresses, cmpopts.EquateEmpty()) { t.Errorf("Resolved addresses of target: %q = %+v, want %+v", a.target, state.Addresses, a.addrWant) } gs := grpclbstate.Get(state) @@ -855,6 +1019,13 @@ func mutateTbl(target string) func() { func testDNSResolveNow(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string addrWant []resolver.Address @@ -874,7 +1045,7 @@ func testDNSResolveNow(t *testing.T) { for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} - r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -926,6 +1097,13 @@ const colonDefaultPort = ":" + defaultPort func testIPResolver(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(_ time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string want []resolver.Address @@ -945,7 +1123,7 @@ func testIPResolver(t *testing.T) { for _, v := range tests { b := NewBuilder() cc := &testClientConn{target: v.target} - r, err := b.Build(resolver.Target{Endpoint: v.target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", v.target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -975,6 +1153,13 @@ func testIPResolver(t *testing.T) { func TestResolveFunc(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { addr string want error @@ -989,7 +1174,7 @@ func TestResolveFunc(t *testing.T) { {"[2001:db8:a0b:12f0::1]:21", nil}, {":80", nil}, {"127.0.0...1:12345", nil}, - {"[fe80::1%lo0]:80", nil}, + {"[fe80::1%25lo0]:80", nil}, {"golang.org:http", nil}, {"[2001:db8::1]:http", nil}, {"[2001:db8::1]:", errEndsWithColon}, @@ -1001,7 +1186,7 @@ func TestResolveFunc(t *testing.T) { b := NewBuilder() for _, v := range tests { cc := &testClientConn{target: v.addr, errChan: make(chan error, 1)} - r, err := b.Build(resolver.Target{Endpoint: v.addr}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", v.addr))}, cc, resolver.BuildOptions{}) if err == nil { r.Close() } @@ -1013,6 +1198,13 @@ func TestResolveFunc(t *testing.T) { func TestDisableServiceConfig(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { target string scWant string @@ -1033,7 +1225,7 @@ func TestDisableServiceConfig(t *testing.T) { for _, a := range tests { b := NewBuilder() cc := &testClientConn{target: a.target} - r, err := b.Build(resolver.Target{Endpoint: a.target}, cc, resolver.BuildOptions{DisableServiceConfig: a.disableServiceConfig}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", a.target))}, cc, resolver.BuildOptions{DisableServiceConfig: a.disableServiceConfig}) if err != nil { t.Fatalf("%v\n", err) } @@ -1059,12 +1251,19 @@ func TestDisableServiceConfig(t *testing.T) { func TestTXTError(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } defer func(v bool) { envconfig.TXTErrIgnore = v }(envconfig.TXTErrIgnore) for _, ignore := range []bool{false, true} { envconfig.TXTErrIgnore = ignore b := NewBuilder() cc := &testClientConn{target: "ipv4.single.fake"} // has A records but not TXT records. - r, err := b.Build(resolver.Target{Endpoint: "ipv4.single.fake"}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", "ipv4.single.fake"))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -1090,10 +1289,17 @@ func TestTXTError(t *testing.T) { } func TestDNSResolverRetry(t *testing.T) { + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } b := NewBuilder() target := "ipv4.single.fake" cc := &testClientConn{target: target} - r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("%v\n", err) } @@ -1144,6 +1350,13 @@ func TestDNSResolverRetry(t *testing.T) { func TestCustomAuthority(t *testing.T) { defer leakcheck.Check(t) + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential backoff. + return time.NewTimer(time.Hour) + } tests := []struct { authority string @@ -1206,14 +1419,14 @@ func TestCustomAuthority(t *testing.T) { true, }, } - oldCustomAuthorityDialler := customAuthorityDialler + oldAddressDialer := addressDialer defer func() { - customAuthorityDialler = oldCustomAuthorityDialler + addressDialer = oldAddressDialer }() for _, a := range tests { errChan := make(chan error, 1) - customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + addressDialer = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { if authority != a.authorityWant { errChan <- fmt.Errorf("wrong custom authority passed to resolver. input: %s expected: %s actual: %s", a.authority, a.authorityWant, authority) } else { @@ -1224,9 +1437,13 @@ func TestCustomAuthority(t *testing.T) { } } + mockEndpointTarget := "foo.bar.com" b := NewBuilder() - cc := &testClientConn{target: "foo.bar.com", errChan: make(chan error, 1)} - r, err := b.Build(resolver.Target{Endpoint: "foo.bar.com", Authority: a.authority}, cc, resolver.BuildOptions{}) + cc := &testClientConn{target: mockEndpointTarget, errChan: make(chan error, 1)} + target := resolver.Target{ + URL: *testutils.MustParseURL(fmt.Sprintf("scheme://%s/%s", a.authority, mockEndpointTarget)), + } + r, err := b.Build(target, cc, resolver.BuildOptions{}) if err == nil { r.Close() @@ -1249,23 +1466,40 @@ func TestCustomAuthority(t *testing.T) { // requests. It sets the re-resolution rate to a small value and repeatedly // calls ResolveNow() and ensures only the expected number of resolution // requests are made. + func TestRateLimitedResolve(t *testing.T) { defer leakcheck.Check(t) - - const dnsResRate = 10 * time.Millisecond - dc := replaceDNSResRate(dnsResRate) - defer dc() + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, will protect from triggering exponential + // backoff. + return time.NewTimer(time.Hour) + } + defer func(nt func(d time.Duration) *time.Timer) { + newTimerDNSResRate = nt + }(newTimerDNSResRate) + + timerChan := testutils.NewChannel() + newTimerDNSResRate = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer + // immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } // Create a new testResolver{} for this test because we want the exact count // of the number of times the resolver was invoked. - nc := replaceNetFunc(make(chan struct{})) + nc := overrideDefaultResolver(true) defer nc() target := "foo.bar.com" b := NewBuilder() cc := &testClientConn{target: target} - r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) if err != nil { t.Fatalf("resolver.Build() returned error: %v\n", err) } @@ -1281,55 +1515,65 @@ func TestRateLimitedResolve(t *testing.T) { t.Fatalf("delegate resolver returned unexpected type: %T\n", tr) } - // Observe the time before unblocking the lookupHost call. The 100ms rate - // limiting timer will begin immediately after that. This means the next - // resolution could happen less than 100ms if we read the time *after* - // receiving from tr.ch - start := time.Now() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() // Wait for the first resolution request to be done. This happens as part - // of the first iteration of the for loop in watcher() because we call - // ResolveNow in Build. - <-tr.ch - - // Here we start a couple of goroutines. One repeatedly calls ResolveNow() - // until asked to stop, and the other waits for two resolution requests to be - // made to our testResolver and stops the former. We measure the start and - // end times, and expect the duration elapsed to be in the interval - // {wantCalls*dnsResRate, wantCalls*dnsResRate} - done := make(chan struct{}) - go func() { - for { - select { - case <-done: - return - default: - r.ResolveNow(resolver.ResolveNowOptions{}) - time.Sleep(1 * time.Millisecond) - } - } - }() + // of the first iteration of the for loop in watcher(). + if _, err := tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") + } - gotCalls := 0 - const wantCalls = 3 - min, max := wantCalls*dnsResRate, (wantCalls+1)*dnsResRate - tMax := time.NewTimer(max) - for gotCalls != wantCalls { - select { - case <-tr.ch: - gotCalls++ - case <-tMax.C: - t.Fatalf("Timed out waiting for %v calls after %v; got %v", wantCalls, max, gotCalls) - } + // Call Resolve Now 100 times, shouldn't continue onto next iteration of + // watcher, thus shouldn't lookup again. + for i := 0; i <= 100; i++ { + r.ResolveNow(resolver.ResolveNowOptions{}) } - close(done) - elapsed := time.Since(start) - if gotCalls != wantCalls { - t.Fatalf("resolve count mismatch for target: %q = %+v, want %+v\n", target, gotCalls, wantCalls) + continueCtx, continueCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer continueCancel() + + if _, err := tr.lookupHostCh.Receive(continueCtx); err == nil { + t.Fatalf("Should not have looked up again as DNS Min Res Rate timer has not gone off.") } - if elapsed < min { - t.Fatalf("elapsed time: %v, wanted it to be between {%v and %v}", elapsed, min, max) + + // Make the DNSMinResRate timer fire immediately (by receiving it, then + // resetting to 0), this will unblock the resolver which is currently + // blocked on the DNS Min Res Rate timer going off, which will allow it to + // continue to the next iteration of the watcher loop. + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + + // Now that DNS Min Res Rate timer has gone off, it should lookup again. + if _, err := tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") + } + + // Resolve Now 1000 more times, shouldn't lookup again as DNS Min Res Rate + // timer has not gone off. + for i := 0; i < 1000; i++ { + r.ResolveNow(resolver.ResolveNowOptions{}) + } + + if _, err = tr.lookupHostCh.Receive(continueCtx); err == nil { + t.Fatalf("Should not have looked up again as DNS Min Res Rate timer has not gone off.") + } + + // Make the DNSMinResRate timer fire immediately again. + timer, err = timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer = timer.(*time.Timer) + timerPointer.Reset(0) + + // Now that DNS Min Res Rate timer has gone off, it should lookup again. + if _, err = tr.lookupHostCh.Receive(ctx); err != nil { + t.Fatalf("Timed out waiting for lookup() call.") } wantAddrs := []resolver.Address{{Addr: "1.2.3.4" + colonDefaultPort}, {Addr: "5.6.7.8" + colonDefaultPort}} @@ -1347,21 +1591,66 @@ func TestRateLimitedResolve(t *testing.T) { } } +// DNS Resolver immediately starts polling on an error. This will cause the re-resolution to return another error. +// Thus, test that it constantly sends errors to the grpc.ClientConn. func TestReportError(t *testing.T) { const target = "notfoundaddress" + defer func(nt func(d time.Duration) *time.Timer) { + newTimer = nt + }(newTimer) + timerChan := testutils.NewChannel() + newTimer = func(d time.Duration) *time.Timer { + // Will never fire on its own, allows this test to call timer immediately. + t := time.NewTimer(time.Hour) + timerChan.Send(t) + return t + } cc := &testClientConn{target: target, errChan: make(chan error)} + totalTimesCalledError := 0 b := NewBuilder() - r, err := b.Build(resolver.Target{Endpoint: target}, cc, resolver.BuildOptions{}) + r, err := b.Build(resolver.Target{URL: *testutils.MustParseURL(fmt.Sprintf("scheme:///%s", target))}, cc, resolver.BuildOptions{}) if err != nil { - t.Fatalf("%v\n", err) + t.Fatalf("Error building resolver for target %v: %v", target, err) + } + // Should receive first error. + err = <-cc.errChan + if !strings.Contains(err.Error(), "hostLookup error") { + t.Fatalf(`ReportError(err=%v) called; want err contains "hostLookupError"`, err) } + totalTimesCalledError++ + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) defer r.Close() - select { - case err := <-cc.errChan: + + // Cause timer to go off 10 times, and see if it matches DNS Resolver updating Error. + for i := 0; i < 10; i++ { + // Should call ReportError(). + err = <-cc.errChan if !strings.Contains(err.Error(), "hostLookup error") { t.Fatalf(`ReportError(err=%v) called; want err contains "hostLookupError"`, err) } - case <-time.After(time.Second): - t.Fatalf("did not receive error after 1s") + totalTimesCalledError++ + timer, err := timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) + } + timerPointer := timer.(*time.Timer) + timerPointer.Reset(0) + } + + if totalTimesCalledError != 11 { + t.Errorf("ReportError() not called 11 times, instead called %d times.", totalTimesCalledError) + } + // Clean up final watcher iteration. + <-cc.errChan + _, err = timerChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving timer from mock NewTimer call: %v", err) } } diff --git a/internal/resolver/passthrough/passthrough.go b/internal/resolver/passthrough/passthrough.go index 520d9229e1ed..afac56572ad5 100644 --- a/internal/resolver/passthrough/passthrough.go +++ b/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/internal/resolver/unix/unix.go b/internal/resolver/unix/unix.go index 0d5a811ddfad..160911687738 100644 --- a/internal/resolver/unix/unix.go +++ b/internal/resolver/unix/unix.go @@ -34,13 +34,24 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } - addr := resolver.Address{Addr: target.Endpoint} + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} if b.scheme == unixAbstractScheme { - // prepend "\x00" to address for unix-abstract - addr.Addr = "\x00" + addr.Addr + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr } cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) return &nopResolver{}, nil diff --git a/internal/serviceconfig/duration.go b/internal/serviceconfig/duration.go new file mode 100644 index 000000000000..11d82afcc7ec --- /dev/null +++ b/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/internal/serviceconfig/duration_test.go b/internal/serviceconfig/duration_test.go new file mode 100644 index 000000000000..5696541aa870 --- /dev/null +++ b/internal/serviceconfig/duration_test.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "fmt" + "math" + "strings" + "testing" + "time" + + "google.golang.org/grpc/internal/grpcrand" +) + +// Tests both marshalling and unmarshalling of Durations. +func TestDuration_MarshalUnmarshal(t *testing.T) { + testCases := []struct { + json string + td time.Duration + unmarshalErr error + noMarshal bool + }{ + // Basic values. + {json: `"1s"`, td: time.Second}, + {json: `"-100.700s"`, td: -100*time.Second - 700*time.Millisecond}, + {json: `".050s"`, td: 50 * time.Millisecond, noMarshal: true}, + {json: `"-.001s"`, td: -1 * time.Millisecond, noMarshal: true}, + {json: `"-0.200s"`, td: -200 * time.Millisecond}, + // Positive near / out of bounds. + {json: `"9223372036s"`, td: 9223372036 * time.Second}, + {json: `"9223372037s"`, td: math.MaxInt64, noMarshal: true}, + {json: `"9223372036.854775807s"`, td: math.MaxInt64}, + {json: `"9223372036.854775808s"`, td: math.MaxInt64, noMarshal: true}, + {json: `"315576000000s"`, td: math.MaxInt64, noMarshal: true}, + {json: `"315576000001s"`, unmarshalErr: fmt.Errorf("out of range")}, + // Negative near / out of bounds. + {json: `"-9223372036s"`, td: -9223372036 * time.Second}, + {json: `"-9223372037s"`, td: math.MinInt64, noMarshal: true}, + {json: `"-9223372036.854775808s"`, td: math.MinInt64}, + {json: `"-9223372036.854775809s"`, td: math.MinInt64, noMarshal: true}, + {json: `"-315576000000s"`, td: math.MinInt64, noMarshal: true}, + {json: `"-315576000001s"`, unmarshalErr: fmt.Errorf("out of range")}, + // Parse errors. + {json: `123s`, unmarshalErr: fmt.Errorf("invalid character")}, + {json: `"5m"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"5.3.2s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"x.3s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"3.xs"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"3.1234567890s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `".s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + {json: `"s"`, unmarshalErr: fmt.Errorf("malformed duration")}, + } + for _, tc := range testCases { + // Seed `got` with a random value to ensure we properly reset it in all + // non-error cases. + got := Duration(grpcrand.Uint64()) + err := got.UnmarshalJSON([]byte(tc.json)) + if (err == nil && time.Duration(got) != tc.td) || + (err != nil) != (tc.unmarshalErr != nil) || !strings.Contains(fmt.Sprint(err), fmt.Sprint(tc.unmarshalErr)) { + t.Errorf("UnmarshalJSON of %v = %v, %v; want %v, %v", tc.json, time.Duration(got), err, tc.td, tc.unmarshalErr) + } + + if tc.unmarshalErr == nil && !tc.noMarshal { + d := Duration(tc.td) + got, err := d.MarshalJSON() + if string(got) != tc.json || err != nil { + t.Errorf("MarshalJSON of %v = %v, %v; want %v, nil", d, string(got), err, tc.json) + } + } + } +} diff --git a/internal/serviceconfig/serviceconfig.go b/internal/serviceconfig/serviceconfig.go index bd4b8875f1a7..51e733e495a3 100644 --- a/internal/serviceconfig/serviceconfig.go +++ b/internal/serviceconfig/serviceconfig.go @@ -46,15 +46,31 @@ type BalancerConfig struct { type intermediateBalancerConfig []map[string]json.RawMessage +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + // UnmarshalJSON implements the json.Unmarshaler interface. // // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) @@ -62,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { return err } + var names []string for i, lbcfg := range ir { if len(lbcfg) != 1 { return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) @@ -76,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { for name, jsonCfg = range lbcfg { } + names = append(names, name) builder := balancer.Get(name) if builder == nil { // If the balancer is not registered, move on to the next config. @@ -104,7 +122,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { // return. This means we had a loadBalancingConfig slice but did not // encounter a registered policy. The config is considered invalid in this // case. - return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) } // MethodConfig defines the configuration recommended by the service providers for a diff --git a/internal/serviceconfig/serviceconfig_test.go b/internal/serviceconfig/serviceconfig_test.go index b8abaae027ef..3a725685db01 100644 --- a/internal/serviceconfig/serviceconfig_test.go +++ b/internal/serviceconfig/serviceconfig_test.go @@ -29,16 +29,18 @@ import ( ) type testBalancerConfigType struct { - externalserviceconfig.LoadBalancingConfig + externalserviceconfig.LoadBalancingConfig `json:"-"` + + Check bool `json:"check"` } -var testBalancerConfig = testBalancerConfigType{} +var testBalancerConfig = testBalancerConfigType{Check: true} const ( testBalancerBuilderName = "test-bb" testBalancerBuilderNotParserName = "test-bb-not-parser" - testBalancerConfigJSON = `{"test-balancer-config":"true"}` + testBalancerConfigJSON = `{"check":true}` ) type testBalancerBuilder struct { @@ -133,3 +135,48 @@ func TestBalancerConfigUnmarshalJSON(t *testing.T) { }) } } + +func TestBalancerConfigMarshalJSON(t *testing.T) { + tests := []struct { + name string + bc BalancerConfig + wantJSON string + }{ + { + name: "OK", + bc: BalancerConfig{ + Name: testBalancerBuilderName, + Config: testBalancerConfig, + }, + wantJSON: `[{"test-bb": {"check":true}}]`, + }, + { + name: "OK config is nil", + bc: BalancerConfig{ + Name: testBalancerBuilderNotParserName, + Config: nil, // nil should be marshalled to an empty config "{}". + }, + wantJSON: `[{"test-bb-not-parser": {}}]`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := tt.bc.MarshalJSON() + if err != nil { + t.Fatalf("failed to marshal: %v", err) + } + + if str := string(b); str != tt.wantJSON { + t.Fatalf("got str %q, want %q", str, tt.wantJSON) + } + + var bc BalancerConfig + if err := bc.UnmarshalJSON(b); err != nil { + t.Errorf("failed to unmarshal: %v", err) + } + if !cmp.Equal(bc, tt.bc) { + t.Errorf("diff: %v", cmp.Diff(bc, tt.bc)) + } + }) + } +} diff --git a/internal/status/status.go b/internal/status/status.go index 710223b8ded0..b0ead4f54f82 100644 --- a/internal/status/status.go +++ b/internal/status/status.go @@ -97,7 +97,7 @@ func (s *Status) Err() error { if s.Code() == codes.OK { return nil } - return &Error{e: s.Proto()} + return &Error{s: s} } // WithDetails returns a new status with the provided details messages appended to the status. @@ -136,19 +136,23 @@ func (s *Status) Details() []interface{} { return details } +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + // Error wraps a pointer of a status proto. It implements error and Status, // and a nil *Error should never be returned by this package. type Error struct { - e *spb.Status + s *Status } func (e *Error) Error() string { - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) + return e.s.String() } // GRPCStatus returns the Status represented by se. func (e *Error) GRPCStatus() *Status { - return FromProto(e.e) + return e.s } // Is implements future error.Is functionality. @@ -158,5 +162,15 @@ func (e *Error) Is(target error) bool { if !ok { return false } - return proto.Equal(e.e, tse.e) + return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false } diff --git a/internal/stubserver/stubserver.go b/internal/stubserver/stubserver.go index c97010dfe9a6..cfc4b0f2e82d 100644 --- a/internal/stubserver/stubserver.go +++ b/internal/stubserver/stubserver.go @@ -17,37 +17,40 @@ */ // Package stubserver is a stubbable implementation of -// google.golang.org/grpc/test/grpc_testing for testing purposes. +// google.golang.org/grpc/interop/grpc_testing for testing purposes. package stubserver import ( "context" "fmt" "net" + "testing" "time" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/serviceconfig" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // StubServer is a server that is easy to customize within individual test // cases. type StubServer struct { // Guarantees we satisfy this interface; panics if unimplemented methods are called. - testpb.TestServiceServer + testgrpc.TestServiceServer // Customizable implementations of server handlers. EmptyCallF func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) UnaryCallF func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) - FullDuplexCallF func(stream testpb.TestService_FullDuplexCallServer) error + FullDuplexCallF func(stream testgrpc.TestService_FullDuplexCallServer) error // A client connected to this service the test may use. Created in Start(). - Client testpb.TestServiceClient + Client testgrpc.TestServiceClient CC *grpc.ClientConn S *grpc.Server @@ -57,6 +60,10 @@ type StubServer struct { Address string Target string + // Custom listener to use for serving. If unspecified, a new listener is + // created on a local port. + Listener net.Listener + cleanups []func() // Lambdas executed in Stop(); populated by Start(). // Set automatically if Target == "" @@ -74,12 +81,37 @@ func (ss *StubServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) ( } // FullDuplexCall is the handler for testpb.FullDuplexCall -func (ss *StubServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (ss *StubServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { return ss.FullDuplexCallF(stream) } // Start starts the server and creates a client connected to it. func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) error { + if err := ss.StartServer(sopts...); err != nil { + return err + } + if err := ss.StartClient(dopts...); err != nil { + ss.Stop() + return err + } + return nil +} + +type registerServiceServerOption struct { + grpc.EmptyServerOption + f func(*grpc.Server) +} + +// RegisterServiceServerOption returns a ServerOption that will run f() in +// Start or StartServer with the grpc.Server created before serving. This +// allows other services to be registered on the test server (e.g. ORCA, +// health, or reflection). +func RegisterServiceServerOption(f func(*grpc.Server)) grpc.ServerOption { + return ®isterServiceServerOption{f: f} +} + +// StartServer only starts the server. It does not create a client to it. +func (ss *StubServer) StartServer(sopts ...grpc.ServerOption) error { if ss.Network == "" { ss.Network = "tcp" } @@ -90,20 +122,36 @@ func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) ss.R = manual.NewBuilderWithScheme("whatever") } - lis, err := net.Listen(ss.Network, ss.Address) - if err != nil { - return fmt.Errorf("net.Listen(%q, %q) = %v", ss.Network, ss.Address, err) + lis := ss.Listener + if lis == nil { + var err error + lis, err = net.Listen(ss.Network, ss.Address) + if err != nil { + return fmt.Errorf("net.Listen(%q, %q) = %v", ss.Network, ss.Address, err) + } } ss.Address = lis.Addr().String() ss.cleanups = append(ss.cleanups, func() { lis.Close() }) s := grpc.NewServer(sopts...) - testpb.RegisterTestServiceServer(s, ss) + for _, so := range sopts { + switch x := so.(type) { + case *registerServiceServerOption: + x.f(s) + } + } + + testgrpc.RegisterTestServiceServer(s, ss) go s.Serve(lis) ss.cleanups = append(ss.cleanups, s.Stop) ss.S = s + return nil +} - opts := append([]grpc.DialOption{grpc.WithInsecure()}, dopts...) +// StartClient creates a client connected to this service that the test may use. +// The newly created client will be available in the Client field of StubServer. +func (ss *StubServer) StartClient(dopts ...grpc.DialOption) error { + opts := append([]grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}, dopts...) if ss.R != nil { ss.Target = ss.R.Scheme() + ":///" + ss.Address opts = append(opts, grpc.WithResolvers(ss.R)) @@ -118,12 +166,13 @@ func (ss *StubServer) Start(sopts []grpc.ServerOption, dopts ...grpc.DialOption) ss.R.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address}}}) } if err := waitForReady(cc); err != nil { + cc.Close() return err } ss.cleanups = append(ss.cleanups, func() { cc.Close() }) - ss.Client = testpb.NewTestServiceClient(cc) + ss.Client = testgrpc.NewTestServiceClient(cc) return nil } @@ -163,3 +212,21 @@ func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { } return g } + +// StartTestService spins up a stub server exposing the TestService on a local +// port. If the passed in server is nil, a stub server that implements only the +// EmptyCall and UnaryCall RPCs is started. +func StartTestService(t *testing.T, server *StubServer) *StubServer { + if server == nil { + server = &StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + UnaryCallF: func(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } + } + server.StartServer() + + t.Logf("Started test service backend at %q", server.Address) + return server +} diff --git a/internal/syscall/syscall_linux.go b/internal/syscall/syscall_linux.go index 4b2964f2a1e3..b3a72276dee4 100644 --- a/internal/syscall/syscall_linux.go +++ b/internal/syscall/syscall_linux.go @@ -1,5 +1,3 @@ -// +build !appengine - /* * * Copyright 2018 gRPC authors. diff --git a/internal/syscall/syscall_nonlinux.go b/internal/syscall/syscall_nonlinux.go index 7913ef1dbfb5..999f52cd75bd 100644 --- a/internal/syscall/syscall_nonlinux.go +++ b/internal/syscall/syscall_nonlinux.go @@ -1,4 +1,5 @@ -// +build !linux appengine +//go:build !linux +// +build !linux /* * @@ -35,41 +36,41 @@ var logger = grpclog.Component("core") func log() { once.Do(func() { - logger.Info("CPU time info is unavailable on non-linux or appengine environment.") + logger.Info("CPU time info is unavailable on non-linux environments.") }) } -// GetCPUTime returns the how much CPU time has passed since the start of this process. -// It always returns 0 under non-linux or appengine environment. +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. func GetCPUTime() int64 { log() return 0 } -// Rusage is an empty struct under non-linux or appengine environment. +// Rusage is an empty struct under non-linux environments. type Rusage struct{} -// GetRusage is a no-op function under non-linux or appengine environment. +// GetRusage is a no-op function under non-linux environments. func GetRusage() *Rusage { log() return nil } // CPUTimeDiff returns the differences of user CPU time and system CPU time used -// between two Rusage structs. It a no-op function for non-linux or appengine environment. +// between two Rusage structs. It a no-op function for non-linux environments. func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { log() return 0, 0 } -// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +// SetTCPUserTimeout is a no-op function under non-linux environments. func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { log() return nil } -// GetTCPUserTimeout is a no-op function under non-linux or appengine environments -// a negative return value indicates the operation is not supported +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported func GetTCPUserTimeout(conn net.Conn) (int, error) { log() return -1, nil diff --git a/xds/internal/testutils/balancer.go b/internal/testutils/balancer.go similarity index 60% rename from xds/internal/testutils/balancer.go rename to internal/testutils/balancer.go index dab84a84e072..8927823d09da 100644 --- a/xds/internal/testutils/balancer.go +++ b/internal/testutils/balancer.go @@ -16,7 +16,6 @@ * */ -// Package testutils provides utility types, for use in xds tests. package testutils import ( @@ -46,21 +45,33 @@ var TestSubConns []*TestSubConn func init() { for i := 0; i < TestSubConnsCount; i++ { TestSubConns = append(TestSubConns, &TestSubConn{ - id: fmt.Sprintf("sc%d", i), + id: fmt.Sprintf("sc%d", i), + ConnectCh: make(chan struct{}, 1), }) } } // TestSubConn implements the SubConn interface, to be used in tests. type TestSubConn struct { - id string + id string + ConnectCh chan struct{} } // UpdateAddresses is a no-op. func (tsc *TestSubConn) UpdateAddresses([]resolver.Address) {} // Connect is a no-op. -func (tsc *TestSubConn) Connect() {} +func (tsc *TestSubConn) Connect() { + select { + case tsc.ConnectCh <- struct{}{}: + default: + } +} + +// GetOrBuildProducer is a no-op. +func (tsc *TestSubConn) GetOrBuildProducer(balancer.ProducerBuilder) (balancer.Producer, func()) { + return nil, nil +} // String implements stringer to print human friendly error message. func (tsc *TestSubConn) String() string { @@ -76,8 +87,9 @@ type TestClientConn struct { RemoveSubConnCh chan balancer.SubConn // the last 10 subconn removed. UpdateAddressesAddrsCh chan []resolver.Address // last updated address via UpdateAddresses(). - NewPickerCh chan balancer.Picker // the last picker updated. - NewStateCh chan connectivity.State // the last state. + NewPickerCh chan balancer.Picker // the last picker updated. + NewStateCh chan connectivity.State // the last state. + ResolveNowCh chan resolver.ResolveNowOptions // the last ResolveNow(). subConnIdx int } @@ -92,8 +104,9 @@ func NewTestClientConn(t *testing.T) *TestClientConn { RemoveSubConnCh: make(chan balancer.SubConn, 10), UpdateAddressesAddrsCh: make(chan []resolver.Address, 1), - NewPickerCh: make(chan balancer.Picker, 1), - NewStateCh: make(chan connectivity.State, 1), + NewPickerCh: make(chan balancer.Picker, 1), + NewStateCh: make(chan connectivity.State, 1), + ResolveNowCh: make(chan resolver.ResolveNowOptions, 1), } } @@ -151,8 +164,12 @@ func (tcc *TestClientConn) UpdateState(bs balancer.State) { } // ResolveNow panics. -func (tcc *TestClientConn) ResolveNow(resolver.ResolveNowOptions) { - panic("not implemented") +func (tcc *TestClientConn) ResolveNow(o resolver.ResolveNowOptions) { + select { + case <-tcc.ResolveNowCh: + default: + } + tcc.ResolveNowCh <- o } // Target panics. @@ -175,6 +192,102 @@ func (tcc *TestClientConn) WaitForErrPicker(ctx context.Context) error { return nil } +// WaitForPickerWithErr waits until an error picker is pushed to this +// ClientConn with the error matching the wanted error. Returns an error if +// the provided context expires, including the last received picker error (if +// any). +func (tcc *TestClientConn) WaitForPickerWithErr(ctx context.Context, want error) error { + lastErr := errors.New("received no picker") + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout when waiting for an error picker with %v; last picker error: %v", want, lastErr) + case picker := <-tcc.NewPickerCh: + if _, lastErr = picker.Pick(balancer.PickInfo{}); lastErr != nil && lastErr.Error() == want.Error() { + return nil + } + } + } +} + +// WaitForConnectivityState waits until the state pushed to this ClientConn +// matches the wanted state. Returns an error if the provided context expires, +// including the last received state (if any). +func (tcc *TestClientConn) WaitForConnectivityState(ctx context.Context, want connectivity.State) error { + var lastState connectivity.State = -1 + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout when waiting for state to be %s; last state: %s", want, lastState) + case s := <-tcc.NewStateCh: + if s == want { + return nil + } + lastState = s + } + } +} + +// WaitForRoundRobinPicker waits for a picker that passes IsRoundRobin. Also +// drains the matching state channel and requires it to be READY (if an entry +// is pending) to be considered. Returns an error if the provided context +// expires, including the last received error from IsRoundRobin or the picker +// (if any). +func (tcc *TestClientConn) WaitForRoundRobinPicker(ctx context.Context, want ...balancer.SubConn) error { + lastErr := errors.New("received no picker") + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout when waiting for round robin picker with %v; last error: %v", want, lastErr) + case p := <-tcc.NewPickerCh: + s := connectivity.Ready + select { + case s = <-tcc.NewStateCh: + default: + } + if s != connectivity.Ready { + lastErr = fmt.Errorf("received state %v instead of ready", s) + break + } + var pickerErr error + if err := IsRoundRobin(want, func() balancer.SubConn { + sc, err := p.Pick(balancer.PickInfo{}) + if err != nil { + pickerErr = err + } else if sc.Done != nil { + sc.Done(balancer.DoneInfo{}) + } + return sc.SubConn + }); pickerErr != nil { + lastErr = pickerErr + continue + } else if err != nil { + lastErr = err + continue + } + return nil + } + } +} + +// WaitForPicker waits for a picker that results in f returning nil. If the +// context expires, returns the last error returned by f (if any). +func (tcc *TestClientConn) WaitForPicker(ctx context.Context, f func(balancer.Picker) error) error { + lastErr := errors.New("received no picker") + for { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout when waiting for picker; last error: %v", lastErr) + case p := <-tcc.NewPickerCh: + if err := f(p); err != nil { + lastErr = err + continue + } + return nil + } + } +} + // IsRoundRobin checks whether f's return value is roundrobin of elements from // want. But it doesn't check for the order. Note that want can contain // duplicate items, which makes it weight-round-robin. @@ -182,16 +295,16 @@ func (tcc *TestClientConn) WaitForErrPicker(ctx context.Context) error { // Step 1. the return values of f should form a permutation of all elements in // want, but not necessary in the same order. E.g. if want is {a,a,b}, the check // fails if f returns: -// - {a,a,a}: third a is returned before b -// - {a,b,b}: second b is returned before the second a +// - {a,a,a}: third a is returned before b +// - {a,b,b}: second b is returned before the second a // // If error is found in this step, the returned error contains only the first // iteration until where it goes wrong. // // Step 2. the return values of f should be repetitions of the same permutation. // E.g. if want is {a,a,b}, the check failes if f returns: -// - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not -// repeating the first iteration. +// - {a,b,a,b,a,a}: though it satisfies step 1, the second iteration is not +// repeating the first iteration. // // If error is found in this step, the returned error contains the first // iteration + the second iteration until where it goes wrong. @@ -231,16 +344,14 @@ func IsRoundRobin(want []balancer.SubConn, f func() balancer.SubConn) error { return nil } -// testClosure is a test util for TestIsRoundRobin. -type testClosure struct { - r []balancer.SubConn - i int -} - -func (tc *testClosure) next() balancer.SubConn { - ret := tc.r[tc.i] - tc.i = (tc.i + 1) % len(tc.r) - return ret +// SubConnFromPicker returns a function which returns a SubConn by calling the +// Pick() method of the provided picker. There is no caching of SubConns here. +// Every invocation of the returned function results in a new pick. +func SubConnFromPicker(p balancer.Picker) func() balancer.SubConn { + return func() balancer.SubConn { + scst, _ := p.Pick(balancer.PickInfo{}) + return scst.SubConn + } } // ErrTestConstPicker is error returned by test const picker. diff --git a/internal/testutils/fakegrpclb/server.go b/internal/testutils/fakegrpclb/server.go new file mode 100644 index 000000000000..82be2c1af1a4 --- /dev/null +++ b/internal/testutils/fakegrpclb/server.go @@ -0,0 +1,249 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package fakegrpclb provides a fake implementation of the grpclb server. +package fakegrpclb + +import ( + "errors" + "fmt" + "io" + "net" + "strconv" + "sync" + "time" + + "google.golang.org/grpc" + lbgrpc "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/status" +) + +var logger = grpclog.Component("fake_grpclb") + +// ServerParams wraps options passed while creating a Server. +type ServerParams struct { + ListenPort int // Listening port for the balancer server. + ServerOptions []grpc.ServerOption // gRPC options for the balancer server. + + LoadBalancedServiceName string // Service name being load balanced for. + LoadBalancedServicePort int // Service port being load balanced for. + BackendAddresses []string // Service backends to balance load across. + ShortStream bool // End balancer stream after sending server list. +} + +// Server is a fake implementation of the grpclb LoadBalancer service. It does +// not support stats reporting from clients, and always sends back a static list +// of backends to the client to balance load across. +// +// It is safe for concurrent access. +type Server struct { + lbgrpc.UnimplementedLoadBalancerServer + + // Options copied over from ServerParams passed to NewServer. + sOpts []grpc.ServerOption // gRPC server options. + serviceName string // Service name being load balanced for. + servicePort int // Service port being load balanced for. + shortStream bool // End balancer stream after sending server list. + + // Values initialized using ServerParams passed to NewServer. + backends []*lbpb.Server // Service backends to balance load across. + lis net.Listener // Listener for grpc connections to the LoadBalancer service. + + // mu guards access to below fields. + mu sync.Mutex + grpcServer *grpc.Server // Underlying grpc server. + address string // Actual listening address. + + stopped chan struct{} // Closed when Stop() is called. +} + +// NewServer creates a new Server with passed in params. Returns a non-nil error +// if the params are invalid. +func NewServer(params ServerParams) (*Server, error) { + var servers []*lbpb.Server + for _, addr := range params.BackendAddresses { + ipStr, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, fmt.Errorf("failed to parse list of backend address %q: %v", addr, err) + } + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("failed to parse ip: %q", ipStr) + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("failed to convert port %q to int", portStr) + } + logger.Infof("Adding backend ip: %q, port: %d to server list", ip.String(), port) + servers = append(servers, &lbpb.Server{ + IpAddress: ip, + Port: int32(port), + }) + } + + lis, err := net.Listen("tcp", "localhost:"+strconv.Itoa(params.ListenPort)) + if err != nil { + return nil, fmt.Errorf("failed to listen on port %q: %v", params.ListenPort, err) + } + + return &Server{ + sOpts: params.ServerOptions, + serviceName: params.LoadBalancedServiceName, + servicePort: params.LoadBalancedServicePort, + shortStream: params.ShortStream, + backends: servers, + lis: lis, + address: lis.Addr().String(), + stopped: make(chan struct{}), + }, nil +} + +// Serve starts serving the LoadBalancer service on a gRPC server. +// +// It returns early with a non-nil error if it is unable to start serving. +// Otherwise, it blocks until Stop() is called, at which point it returns the +// error returned by the underlying grpc.Server's Serve() method. +func (s *Server) Serve() error { + s.mu.Lock() + if s.grpcServer != nil { + s.mu.Unlock() + return errors.New("Serve() called multiple times") + } + + server := grpc.NewServer(s.sOpts...) + s.grpcServer = server + s.mu.Unlock() + + logger.Infof("Begin listening on %s", s.lis.Addr().String()) + lbgrpc.RegisterLoadBalancerServer(server, s) + return server.Serve(s.lis) // This call will block. +} + +// Stop stops serving the LoadBalancer service and unblocks the preceding call +// to Serve(). +func (s *Server) Stop() { + defer close(s.stopped) + s.mu.Lock() + if s.grpcServer != nil { + s.grpcServer.Stop() + s.grpcServer = nil + } + s.mu.Unlock() +} + +// Address returns the host:port on which the LoadBalancer service is serving. +func (s *Server) Address() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.address +} + +// BalanceLoad provides a fake implementation of the LoadBalancer service. +func (s *Server) BalanceLoad(stream lbgrpc.LoadBalancer_BalanceLoadServer) error { + logger.Info("New BalancerLoad stream started") + + req, err := stream.Recv() + if err == io.EOF { + logger.Warning("Received EOF when reading from the stream") + return nil + } + if err != nil { + logger.Warning("Failed to read LoadBalanceRequest from stream: %v", err) + return err + } + logger.Infof("Received LoadBalancerRequest:\n%s", pretty.ToJSON(req)) + + // Initial request contains the service being load balanced for. + initialReq := req.GetInitialRequest() + if initialReq == nil { + logger.Info("First message on the stream does not contain an InitialLoadBalanceRequest") + return status.Error(codes.Unknown, "First request not an InitialLoadBalanceRequest") + } + + // Basic validation of the service name and port from the incoming request. + // + // Clients targeting service:port can sometimes include the ":port" suffix in + // their requested names; handle this case. + serviceName, port, err := net.SplitHostPort(initialReq.Name) + if err != nil { + // Requested name did not contain a port. So, use the name as is. + serviceName = initialReq.Name + } else { + p, err := strconv.Atoi(port) + if err != nil { + logger.Info("Failed to parse requested service port %q to integer", port) + return status.Error(codes.Unknown, "Bad requested service port number") + } + if p != s.servicePort { + logger.Info("Requested service port number %q does not match expected", port, s.servicePort) + return status.Error(codes.Unknown, "Bad requested service port number") + } + } + if serviceName != s.serviceName { + logger.Info("Requested service name %q does not match expected %q", serviceName, s.serviceName) + return status.Error(codes.NotFound, "Bad requested service name") + } + + // Empty initial response disables stats reporting from the client. Stats + // reporting from the client is used to determine backend load and is not + // required for the purposes of this fake. + initResp := &lbpb.LoadBalanceResponse{ + LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ + InitialResponse: &lbpb.InitialLoadBalanceResponse{}, + }, + } + if err := stream.Send(initResp); err != nil { + logger.Warningf("Failed to send InitialLoadBalanceResponse on the stream: %v", err) + return err + } + + resp := &lbpb.LoadBalanceResponse{ + LoadBalanceResponseType: &lbpb.LoadBalanceResponse_ServerList{ + ServerList: &lbpb.ServerList{Servers: s.backends}, + }, + } + logger.Infof("Sending response with server list: %s", pretty.ToJSON(resp)) + if err := stream.Send(resp); err != nil { + logger.Warningf("Failed to send InitialLoadBalanceResponse on the stream: %v", err) + return err + } + + if s.shortStream { + logger.Info("Ending stream early as the short stream option was set") + return nil + } + + for { + select { + case <-stream.Context().Done(): + return nil + case <-s.stopped: + return nil + case <-time.After(10 * time.Second): + logger.Infof("Sending response with server list: %s", pretty.ToJSON(resp)) + if err := stream.Send(resp); err != nil { + logger.Warningf("Failed to send InitialLoadBalanceResponse on the stream: %v", err) + return err + } + } + } +} diff --git a/xds/internal/testutils/local_listener.go b/internal/testutils/local_listener.go similarity index 100% rename from xds/internal/testutils/local_listener.go rename to internal/testutils/local_listener.go diff --git a/internal/testutils/marshal_any.go b/internal/testutils/marshal_any.go new file mode 100644 index 000000000000..9ddef6de15d6 --- /dev/null +++ b/internal/testutils/marshal_any.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package testutils + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/types/known/anypb" +) + +// MarshalAny is a convenience function to marshal protobuf messages into any +// protos. It will panic if the marshaling fails. +func MarshalAny(m proto.Message) *anypb.Any { + a, err := ptypes.MarshalAny(m) + if err != nil { + panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) + } + return a +} diff --git a/internal/testutils/parse_port.go b/internal/testutils/parse_port.go new file mode 100644 index 000000000000..c633af06a7db --- /dev/null +++ b/internal/testutils/parse_port.go @@ -0,0 +1,39 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package testutils + +import ( + "net" + "strconv" + "testing" +) + +// ParsePort returns the port from the given address string, as a unit32. +func ParsePort(t *testing.T, addr string) uint32 { + t.Helper() + + _, p, err := net.SplitHostPort(addr) + if err != nil { + t.Fatalf("Invalid serving address: %v", err) + } + port, err := strconv.ParseUint(p, 10, 32) + if err != nil { + t.Fatalf("Invalid serving port: %v", err) + } + return uint32(port) +} diff --git a/internal/testutils/parse_url.go b/internal/testutils/parse_url.go new file mode 100644 index 000000000000..ff276e4d0c38 --- /dev/null +++ b/internal/testutils/parse_url.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import ( + "fmt" + "net/url" +) + +// MustParseURL attempts to parse the provided target using url.Parse() +// and panics if parsing fails. +func MustParseURL(target string) *url.URL { + u, err := url.Parse(target) + if err != nil { + panic(fmt.Sprintf("Error parsing target(%s): %v", target, err)) + } + return u +} diff --git a/internal/testutils/pickfirst/pickfirst.go b/internal/testutils/pickfirst/pickfirst.go new file mode 100644 index 000000000000..aa90ffc531f4 --- /dev/null +++ b/internal/testutils/pickfirst/pickfirst.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirst contains helper functions to check for pickfirst load +// balancing of RPCs in tests. +package pickfirst + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// CheckRPCsToBackend makes a bunch of RPCs on the given ClientConn and verifies +// if the RPCs are routed to a peer matching wantAddr. +// +// Returns a non-nil error if context deadline expires before all RPCs begin to +// be routed to the peer matching wantAddr, or if the backend returns RPC errors. +func CheckRPCsToBackend(ctx context.Context, cc *grpc.ClientConn, wantAddr resolver.Address) error { + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + // Make sure that 20 RPCs in a row reach the expected backend. Some + // tests switch from round_robin back to pick_first and call this + // function. None of our tests spin up more than 10 backends. So, + // waiting for 20 RPCs to reach a single backend would a decent + // indicator of having switched to pick_first. + count := 0 + for { + time.Sleep(time.Millisecond) + if ctx.Err() != nil { + return fmt.Errorf("timeout waiting for RPC to be routed to %s", wantAddr.Addr) + } + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + // Some tests remove backends and check if pick_first is happening across + // the remaining backends. In such cases, RPCs can initially fail on the + // connection using the removed backend. Just keep retrying and eventually + // the connection using the removed backend will shutdown and will be + // removed. + continue + } + if peer.Addr.String() != wantAddr.Addr { + count = 0 + continue + } + count++ + if count > 20 { + break + } + } + // Make sure subsequent RPCs are all routed to the same backend. + for i := 0; i < 10; i++ { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + return fmt.Errorf("EmptyCall() = %v, want ", err) + } + if gotAddr := peer.Addr.String(); gotAddr != wantAddr.Addr { + return fmt.Errorf("rpc sent to peer %q, want peer %q", gotAddr, wantAddr) + } + } + return nil +} diff --git a/internal/testutils/restartable_listener.go b/internal/testutils/restartable_listener.go new file mode 100644 index 000000000000..efe4019a08c2 --- /dev/null +++ b/internal/testutils/restartable_listener.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import ( + "net" + "sync" +) + +type tempError struct{} + +func (*tempError) Error() string { + return "restartable listener temporary error" +} +func (*tempError) Temporary() bool { + return true +} + +// RestartableListener wraps a net.Listener and supports stopping and restarting +// the latter. +type RestartableListener struct { + lis net.Listener + + mu sync.Mutex + stopped bool + conns []net.Conn +} + +// NewRestartableListener returns a new RestartableListener wrapping l. +func NewRestartableListener(l net.Listener) *RestartableListener { + return &RestartableListener{lis: l} +} + +// Accept waits for and returns the next connection to the listener. +// +// If the listener is currently not accepting new connections, because `Stop` +// was called on it, the connection is immediately closed after accepting +// without any bytes being sent on it. +func (l *RestartableListener) Accept() (net.Conn, error) { + conn, err := l.lis.Accept() + if err != nil { + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stopped { + conn.Close() + return nil, &tempError{} + } + l.conns = append(l.conns, conn) + return conn, nil +} + +// Close closes the listener. +func (l *RestartableListener) Close() error { + return l.lis.Close() +} + +// Addr returns the listener's network address. +func (l *RestartableListener) Addr() net.Addr { + return l.lis.Addr() +} + +// Stop closes existing connections on the listener and prevents new connections +// from being accepted. +func (l *RestartableListener) Stop() { + l.mu.Lock() + l.stopped = true + for _, conn := range l.conns { + conn.Close() + } + l.conns = nil + l.mu.Unlock() +} + +// Restart gets a previously stopped listener to start accepting connections. +func (l *RestartableListener) Restart() { + l.mu.Lock() + l.stopped = false + l.mu.Unlock() +} diff --git a/internal/testutils/rls/fake_rls_server.go b/internal/testutils/rls/fake_rls_server.go new file mode 100644 index 000000000000..e64c9de3ae7f --- /dev/null +++ b/internal/testutils/rls/fake_rls_server.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls contains utilities for RouteLookupService e2e tests. +package rls + +import ( + "context" + "net" + "sync" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rlsgrpc "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/status" +) + +// RouteLookupResponse wraps an RLS response and the associated error to be sent +// to a client when the RouteLookup RPC is invoked. +type RouteLookupResponse struct { + Resp *rlspb.RouteLookupResponse + Err error +} + +// SetupFakeRLSServer starts and returns a fake RouteLookupService server +// listening on the given listener or on a random local port. Also returns a +// channel for tests to get notified whenever the RouteLookup RPC is invoked on +// the fake server. +// +// This function sets up the fake server to respond with an empty response for +// the RouteLookup RPCs. Tests can override this by calling the +// SetResponseCallback() method on the returned fake server. +func SetupFakeRLSServer(t *testing.T, lis net.Listener, opts ...grpc.ServerOption) (*FakeRouteLookupServer, chan struct{}) { + s, cancel := StartFakeRouteLookupServer(t, lis, opts...) + t.Logf("Started fake RLS server at %q", s.Address) + + ch := make(chan struct{}, 1) + s.SetRequestCallback(func(request *rlspb.RouteLookupRequest) { + select { + case ch <- struct{}{}: + default: + } + }) + t.Cleanup(cancel) + return s, ch +} + +// FakeRouteLookupServer is a fake implementation of the RouteLookupService. +// +// It is safe for concurrent use. +type FakeRouteLookupServer struct { + rlsgrpc.UnimplementedRouteLookupServiceServer + Address string + + mu sync.Mutex + respCb func(context.Context, *rlspb.RouteLookupRequest) *RouteLookupResponse + reqCb func(*rlspb.RouteLookupRequest) +} + +// StartFakeRouteLookupServer starts a fake RLS server listening for requests on +// lis. If lis is nil, it creates a new listener on a random local port. The +// returned cancel function should be invoked by the caller upon completion of +// the test. +func StartFakeRouteLookupServer(t *testing.T, lis net.Listener, opts ...grpc.ServerOption) (*FakeRouteLookupServer, func()) { + t.Helper() + + if lis == nil { + var err error + lis, err = testutils.LocalTCPListener() + if err != nil { + t.Fatalf("net.Listen() failed: %v", err) + } + } + + s := &FakeRouteLookupServer{Address: lis.Addr().String()} + server := grpc.NewServer(opts...) + rlsgrpc.RegisterRouteLookupServiceServer(server, s) + go server.Serve(lis) + return s, func() { server.Stop() } +} + +// RouteLookup implements the RouteLookupService. +func (s *FakeRouteLookupServer) RouteLookup(ctx context.Context, req *rlspb.RouteLookupRequest) (*rlspb.RouteLookupResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.reqCb != nil { + s.reqCb(req) + } + if err := ctx.Err(); err != nil { + return nil, status.Error(codes.DeadlineExceeded, err.Error()) + } + if s.respCb == nil { + return &rlspb.RouteLookupResponse{}, nil + } + resp := s.respCb(ctx, req) + return resp.Resp, resp.Err +} + +// SetResponseCallback sets a callback to be invoked on every RLS request. If +// this callback is set, the response returned by the fake server depends on the +// value returned by the callback. If this callback is not set, the fake server +// responds with an empty response. +func (s *FakeRouteLookupServer) SetResponseCallback(f func(context.Context, *rlspb.RouteLookupRequest) *RouteLookupResponse) { + s.mu.Lock() + s.respCb = f + s.mu.Unlock() +} + +// SetRequestCallback sets a callback to be invoked on every RLS request. The +// callback is given the incoming request, and tests can use this to verify that +// the request matches its expectations. +func (s *FakeRouteLookupServer) SetRequestCallback(f func(*rlspb.RouteLookupRequest)) { + s.mu.Lock() + s.reqCb = f + s.mu.Unlock() +} diff --git a/internal/testutils/roundrobin/roundrobin.go b/internal/testutils/roundrobin/roundrobin.go new file mode 100644 index 000000000000..ba595735364d --- /dev/null +++ b/internal/testutils/roundrobin/roundrobin.go @@ -0,0 +1,223 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin contains helper functions to check for roundrobin and +// weighted-roundrobin load balancing of RPCs in tests. +package roundrobin + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +var logger = grpclog.Component("testutils-roundrobin") + +// waitForTrafficToReachBackends repeatedly makes RPCs using the provided +// TestServiceClient until RPCs reach all backends specified in addrs, or the +// context expires, in which case a non-nil error is returned. +func waitForTrafficToReachBackends(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + // Make sure connections to all backends are up. We need to do this two + // times (to be sure that round_robin has kicked in) because the channel + // could have been configured with a different LB policy before the switch + // to round_robin. And the previous LB policy could be sharing backends with + // round_robin, and therefore in the first iteration of this loop, RPCs + // could land on backends owned by the previous LB policy. + for j := 0; j < 2; j++ { + for i := 0; i < len(addrs); i++ { + for { + time.Sleep(time.Millisecond) + if ctx.Err() != nil { + return fmt.Errorf("timeout waiting for connection to %q to be up", addrs[i].Addr) + } + var peer peer.Peer + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + // Some tests remove backends and check if round robin is + // happening across the remaining backends. In such cases, + // RPCs can initially fail on the connection using the + // removed backend. Just keep retrying and eventually the + // connection using the removed backend will shutdown and + // will be removed. + continue + } + if peer.Addr.String() == addrs[i].Addr { + break + } + } + } + } + return nil +} + +// CheckRoundRobinRPCs verifies that EmptyCall RPCs on the given ClientConn, +// connected to a server exposing the test.grpc_testing.TestService, are +// roundrobined across the given backend addresses. +// +// Returns a non-nil error if context deadline expires before RPCs start to get +// roundrobined across the given backends. +func CheckRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + if err := waitForTrafficToReachBackends(ctx, client, addrs); err != nil { + return err + } + + // At this point, RPCs are getting successfully executed at the backends + // that we care about. To support duplicate addresses (in addrs) and + // backends being removed from the list of addresses passed to the + // roundrobin LB, we do the following: + // 1. Determine the count of RPCs that we expect each of our backends to + // receive per iteration. + // 2. Wait until the same pattern repeats a few times, or the context + // deadline expires. + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + // Perform 3 more iterations. + var iterations [][]string + for i := 0; i < 3; i++ { + iteration := make([]string, len(addrs)) + for c := 0; c < len(addrs); c++ { + var peer peer.Peer + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + return fmt.Errorf("EmptyCall() = %v, want ", err) + } + iteration[c] = peer.Addr.String() + } + iterations = append(iterations, iteration) + } + // Ensure the first iteration contains all addresses in addrs. + gotAddrCount := make(map[string]int) + for _, addr := range iterations[0] { + gotAddrCount[addr]++ + } + if diff := cmp.Diff(gotAddrCount, wantAddrCount); diff != "" { + logger.Infof("non-roundrobin, got address count in one iteration: %v, want: %v, Diff: %s", gotAddrCount, wantAddrCount, diff) + continue + } + // Ensure all three iterations contain the same addresses. + if !cmp.Equal(iterations[0], iterations[1]) || !cmp.Equal(iterations[0], iterations[2]) { + logger.Infof("non-roundrobin, first iter: %v, second iter: %v, third iter: %v", iterations[0], iterations[1], iterations[2]) + continue + } + return nil + } + return fmt.Errorf("timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) +} + +// CheckWeightedRoundRobinRPCs verifies that EmptyCall RPCs on the given +// ClientConn, connected to a server exposing the test.grpc_testing.TestService, +// are weighted roundrobined (with randomness) across the given backend +// addresses. +// +// Returns a non-nil error if context deadline expires before RPCs start to get +// roundrobined across the given backends. +func CheckWeightedRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + if err := waitForTrafficToReachBackends(ctx, client, addrs); err != nil { + return err + } + + // At this point, RPCs are getting successfully executed at the backends + // that we care about. To take the randomness of the WRR into account, we + // look for approximate distribution instead of exact. + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + wantRatio := make(map[string]float64) + for addr, count := range wantAddrCount { + wantRatio[addr] = float64(count) / float64(len(addrs)) + } + + // There is a small possibility that RPCs are reaching backends that we + // don't expect them to reach here. The can happen because: + // - at time T0, the list of backends [A, B, C, D]. + // - at time T1, the test updates the list of backends to [A, B, C], and + // immediately starts attempting to check the distribution of RPCs to the + // new backends. + // - there is no way for the test to wait for a new picker to be pushed on + // to the channel (which contains the updated list of backends) before + // starting to attempt the RPC distribution checks. + // - This is usually a transitory state and will eventually fix itself when + // the new picker is pushed on the channel, and RPCs will start getting + // routed to only backends that we care about. + // + // We work around this situation by using two loops. The inner loop contains + // the meat of the calculations, and includes the logic which factors out + // the randomness in weighted roundrobin. If we ever see an RPCs getting + // routed to a backend that we dont expect it to get routed to, we break + // from the inner loop thereby resetting all state and start afresh. + for { + results := make(map[string]float64) + totalCount := float64(0) + InnerLoop: + for { + if ctx.Err() != nil { + return fmt.Errorf("timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) + } + for i := 0; i < len(addrs); i++ { + var peer peer.Peer + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)); err != nil { + return fmt.Errorf("EmptyCall() = %v, want ", err) + } + if addr := peer.Addr.String(); wantAddrCount[addr] == 0 { + break InnerLoop + } + results[peer.Addr.String()]++ + } + totalCount += float64(len(addrs)) + + gotRatio := make(map[string]float64) + for addr, count := range results { + gotRatio[addr] = count / totalCount + } + if equalApproximate(gotRatio, wantRatio) { + return nil + } + logger.Infof("non-weighted-roundrobin, gotRatio: %v, wantRatio: %v", gotRatio, wantRatio) + } + <-time.After(time.Millisecond) + } +} + +func equalApproximate(got, want map[string]float64) bool { + if len(got) != len(want) { + return false + } + opt := cmp.Comparer(func(x, y float64) bool { + delta := math.Abs(x - y) + mean := math.Abs(x+y) / 2.0 + return delta/mean < 0.05 + }) + for addr := range want { + if !cmp.Equal(got[addr], want[addr], opt) { + return false + } + } + return true +} diff --git a/internal/testutils/wrappers.go b/internal/testutils/wrappers.go new file mode 100644 index 000000000000..c9b596d8851c --- /dev/null +++ b/internal/testutils/wrappers.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import ( + "net" + "testing" +) + +// ConnWrapper wraps a net.Conn and pushes on a channel when closed. +type ConnWrapper struct { + net.Conn + CloseCh *Channel +} + +// Close closes the connection and sends a value on the close channel. +func (cw *ConnWrapper) Close() error { + err := cw.Conn.Close() + cw.CloseCh.Replace(nil) + return err +} + +// ListenerWrapper wraps a net.Listener and the returned net.Conn. +// +// It pushes on a channel whenever it accepts a new connection. +type ListenerWrapper struct { + net.Listener + NewConnCh *Channel +} + +// Accept wraps the Listener Accept and sends the accepted connection on a +// channel. +func (l *ListenerWrapper) Accept() (net.Conn, error) { + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + closeCh := NewChannel() + conn := &ConnWrapper{Conn: c, CloseCh: closeCh} + l.NewConnCh.Send(conn) + return conn, nil +} + +// NewListenerWrapper returns a ListenerWrapper. +func NewListenerWrapper(t *testing.T, lis net.Listener) *ListenerWrapper { + if lis == nil { + var err error + lis, err = LocalTCPListener() + if err != nil { + t.Fatal(err) + } + } + + return &ListenerWrapper{ + Listener: lis, + NewConnCh: NewChannel(), + } +} diff --git a/xds/internal/testutils/wrr.go b/internal/testutils/wrr.go similarity index 100% rename from xds/internal/testutils/wrr.go rename to internal/testutils/wrr.go diff --git a/internal/testutils/xds/bootstrap/bootstrap.go b/internal/testutils/xds/bootstrap/bootstrap.go new file mode 100644 index 000000000000..786a6a4d7513 --- /dev/null +++ b/internal/testutils/xds/bootstrap/bootstrap.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bootstrap provides functionality to generate bootstrap configuration. +package bootstrap + +import ( + "encoding/json" + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" +) + +var logger = grpclog.Component("internal/xds") + +// Options wraps the parameters used to generate bootstrap configuration. +type Options struct { + // NodeID is the node identifier of the gRPC client/server node in the + // proxyless service mesh. + NodeID string + // ServerURI is the address of the management server. + ServerURI string + // IgnoreResourceDeletion, if true, results in a bootstrap config with the + // `server_features` list containing `ignore_resource_deletion`. This results + // in gRPC ignoring resource deletions from the management server, as per A53. + IgnoreResourceDeletion bool + // ClientDefaultListenerResourceNameTemplate is the default listener + // resource name template to be used on the gRPC client. + ClientDefaultListenerResourceNameTemplate string + // ServerListenerResourceNameTemplate is the listener resource name template + // to be used on the gRPC server. + ServerListenerResourceNameTemplate string + // CertificateProviders is the certificate providers configuration. + CertificateProviders map[string]json.RawMessage + // Authorities is a list of non-default authorities. + // + // In the config, an authority contains {ServerURI, xds-version, creds, + // features, etc}. Note that this fields only has ServerURI (it's a + // map[authority-name]ServerURI). The other fields (version, creds, + // features) are assumed to be the same as the default authority (they can + // be added later if needed). + // + // If the env var corresponding to federation (envconfig.XDSFederation) is + // set, an entry with empty string as the key and empty server config as + // value will be added. This will be used by new style resource names with + // an empty authority. + Authorities map[string]string +} + +// CreateFile creates a temporary file with bootstrap contents, based on the +// passed in options, and updates the bootstrap environment variable to point to +// this file. +// +// Returns a cleanup function which will be non-nil if the setup process was +// completed successfully. It is the responsibility of the caller to invoke the +// cleanup function at the end of the test. +func CreateFile(opts Options) (func(), error) { + bootstrapContents, err := Contents(opts) + if err != nil { + return nil, err + } + f, err := os.CreateTemp("", "test_xds_bootstrap_*") + if err != nil { + return nil, fmt.Errorf("failed to created bootstrap file: %v", err) + } + + if err := os.WriteFile(f.Name(), bootstrapContents, 0644); err != nil { + return nil, fmt.Errorf("failed to created bootstrap file: %v", err) + } + logger.Infof("Created bootstrap file at %q with contents: %s\n", f.Name(), bootstrapContents) + + origBootstrapFileName := envconfig.XDSBootstrapFileName + envconfig.XDSBootstrapFileName = f.Name() + return func() { + os.Remove(f.Name()) + envconfig.XDSBootstrapFileName = origBootstrapFileName + }, nil +} + +// Contents returns the contents to go into a bootstrap file, environment, or +// configuration passed to xds.NewXDSResolverWithConfigForTesting. +func Contents(opts Options) ([]byte, error) { + cfg := &bootstrapConfig{ + XdsServers: []server{ + { + ServerURI: opts.ServerURI, + ChannelCreds: []creds{{Type: "insecure"}}, + }, + }, + Node: node{ + ID: opts.NodeID, + }, + CertificateProviders: opts.CertificateProviders, + ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate, + ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, + } + cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "xds_v3") + if opts.IgnoreResourceDeletion { + cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "ignore_resource_deletion") + } + + auths := make(map[string]authority) + if envconfig.XDSFederation { + // This will end up using the top-level server list for new style + // resources with empty authority. + auths[""] = authority{} + } + for n, auURI := range opts.Authorities { + auths[n] = authority{XdsServers: []server{{ + ServerURI: auURI, + ChannelCreds: []creds{{Type: "insecure"}}, + ServerFeatures: cfg.XdsServers[0].ServerFeatures, + }}} + } + cfg.Authorities = auths + + bootstrapContents, err := json.MarshalIndent(cfg, "", " ") + if err != nil { + return nil, fmt.Errorf("failed to created bootstrap file: %v", err) + } + return bootstrapContents, nil +} + +type bootstrapConfig struct { + XdsServers []server `json:"xds_servers,omitempty"` + Node node `json:"node,omitempty"` + CertificateProviders map[string]json.RawMessage `json:"certificate_providers,omitempty"` + ClientDefaultListenerResourceNameTemplate string `json:"client_default_listener_resource_name_template,omitempty"` + ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` + Authorities map[string]authority `json:"authorities,omitempty"` +} + +type authority struct { + XdsServers []server `json:"xds_servers,omitempty"` +} + +type server struct { + ServerURI string `json:"server_uri,omitempty"` + ChannelCreds []creds `json:"channel_creds,omitempty"` + ServerFeatures []string `json:"server_features,omitempty"` +} + +type creds struct { + Type string `json:"type,omitempty"` + Config interface{} `json:"config,omitempty"` +} + +type node struct { + ID string `json:"id,omitempty"` +} diff --git a/xds/internal/testutils/e2e/bootstrap.go b/internal/testutils/xds/e2e/bootstrap.go similarity index 86% rename from xds/internal/testutils/e2e/bootstrap.go rename to internal/testutils/xds/e2e/bootstrap.go index 72a1a9900cfe..99702032f817 100644 --- a/xds/internal/testutils/e2e/bootstrap.go +++ b/internal/testutils/xds/e2e/bootstrap.go @@ -26,8 +26,8 @@ import ( // DefaultFileWatcherConfig is a helper function to create a default certificate // provider plugin configuration. The test is expected to have setup the files // appropriately before this configuration is used to instantiate providers. -func DefaultFileWatcherConfig(certPath, keyPath, caPath string) map[string]json.RawMessage { - cfg := fmt.Sprintf(`{ +func DefaultFileWatcherConfig(certPath, keyPath, caPath string) json.RawMessage { + return json.RawMessage(fmt.Sprintf(`{ "plugin_name": "file_watcher", "config": { "certificate_file": %q, @@ -35,8 +35,5 @@ func DefaultFileWatcherConfig(certPath, keyPath, caPath string) map[string]json. "ca_certificate_file": %q, "refresh_interval": "600s" } - }`, certPath, keyPath, caPath) - return map[string]json.RawMessage{ - "google_cloud_private_spiffe": json.RawMessage(cfg), - } + }`, certPath, keyPath, caPath)) } diff --git a/internal/testutils/xds/e2e/clientresources.go b/internal/testutils/xds/e2e/clientresources.go new file mode 100644 index 000000000000..9d46483e3d23 --- /dev/null +++ b/internal/testutils/xds/e2e/clientresources.go @@ -0,0 +1,637 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "fmt" + "net" + "strconv" + + "github.com/envoyproxy/go-control-plane/pkg/wellknown" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/protobuf/types/known/anypb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" +) + +const ( + // ServerListenerResourceNameTemplate is the Listener resource name template + // used on the server side. + ServerListenerResourceNameTemplate = "grpc/server?xds.resource.listening_address=%s" + // ClientSideCertProviderInstance is the certificate provider instance name + // used in the Cluster resource on the client side. + ClientSideCertProviderInstance = "client-side-certificate-provider-instance" + // ServerSideCertProviderInstance is the certificate provider instance name + // used in the Listener resource on the server side. + ServerSideCertProviderInstance = "server-side-certificate-provider-instance" +) + +// SecurityLevel allows the test to control the security level to be used in the +// resource returned by this package. +type SecurityLevel int + +const ( + // SecurityLevelNone is used when no security configuration is required. + SecurityLevelNone SecurityLevel = iota + // SecurityLevelTLS is used when security configuration corresponding to TLS + // is required. Only the server presents an identity certificate in this + // configuration. + SecurityLevelTLS + // SecurityLevelMTLS is used when security ocnfiguration corresponding to + // mTLS is required. Both client and server present identity certificates in + // this configuration. + SecurityLevelMTLS +) + +// ResourceParams wraps the arguments to be passed to DefaultClientResources. +type ResourceParams struct { + // DialTarget is the client's dial target. This is used as the name of the + // Listener resource. + DialTarget string + // NodeID is the id of the xdsClient to which this update is to be pushed. + NodeID string + // Host is the host of the default Endpoint resource. + Host string + // port is the port of the default Endpoint resource. + Port uint32 + // SecLevel controls the security configuration in the Cluster resource. + SecLevel SecurityLevel +} + +// DefaultClientResources returns a set of resources (LDS, RDS, CDS, EDS) for a +// client to generically connect to one server. +func DefaultClientResources(params ResourceParams) UpdateOptions { + routeConfigName := "route-" + params.DialTarget + clusterName := "cluster-" + params.DialTarget + endpointsName := "endpoints-" + params.DialTarget + return UpdateOptions{ + NodeID: params.NodeID, + Listeners: []*v3listenerpb.Listener{DefaultClientListener(params.DialTarget, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{DefaultRouteConfig(routeConfigName, params.DialTarget, clusterName)}, + Clusters: []*v3clusterpb.Cluster{DefaultCluster(clusterName, endpointsName, params.SecLevel)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{DefaultEndpoint(endpointsName, params.Host, []uint32{params.Port})}, + } +} + +// RouterHTTPFilter is the HTTP Filter configuration for the Router filter. +var RouterHTTPFilter = HTTPFilter("router", &v3routerpb.Router{}) + +// DefaultClientListener returns a basic xds Listener resource to be used on +// the client side. +func DefaultClientListener(target, routeName string) *v3listenerpb.Listener { + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: routeName, + }}, + HttpFilters: []*v3httppb.HttpFilter{HTTPFilter("router", &v3routerpb.Router{})}, // router fields are unused by grpc + }) + return &v3listenerpb.Listener{ + Name: target, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + } +} + +// DefaultServerListener returns a basic xds Listener resource to be used on +// the server side. +func DefaultServerListener(host string, port uint32, secLevel SecurityLevel) *v3listenerpb.Listener { + var tlsContext *v3tlspb.DownstreamTlsContext + switch secLevel { + case SecurityLevelNone: + case SecurityLevelTLS: + tlsContext = &v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ServerSideCertProviderInstance, + }, + }, + } + case SecurityLevelMTLS: + tlsContext = &v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ServerSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ServerSideCertProviderInstance, + }, + }, + }, + } + } + + var ts *v3corepb.TransportSocket + if tlsContext != nil { + ts = &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(tlsContext), + }, + } + } + return &v3listenerpb.Listener{ + Name: fmt.Sprintf(ServerListenerResourceNameTemplate, net.JoinHostPort(host, strconv.Itoa(int(port)))), + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "v4-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + // This "*" string matches on any incoming authority. This is to ensure any + // incoming RPC matches to Route_NonForwardingAction and will proceed as + // normal. + Domains: []string{"*"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{RouterHTTPFilter}, + }), + }, + }, + }, + TransportSocket: ts, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + // This "*" string matches on any incoming authority. This is to ensure any + // incoming RPC matches to Route_NonForwardingAction and will proceed as + // normal. + Domains: []string{"*"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{RouterHTTPFilter}, + }), + }, + }, + }, + TransportSocket: ts, + }, + }, + } +} + +// HTTPFilter constructs an xds HttpFilter with the provided name and config. +func HTTPFilter(name string, config proto.Message) *v3httppb.HttpFilter { + return &v3httppb.HttpFilter{ + Name: name, + ConfigType: &v3httppb.HttpFilter_TypedConfig{ + TypedConfig: testutils.MarshalAny(config), + }, + } +} + +// DefaultRouteConfig returns a basic xds RouteConfig resource. +func DefaultRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: clusterName, + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }, + }}, + }}, + }}, + }}, + } +} + +// RouteConfigClusterSpecifierType determines the cluster specifier type for the +// route actions configured in the returned RouteConfiguration resource. +type RouteConfigClusterSpecifierType int + +const ( + // RouteConfigClusterSpecifierTypeCluster results in the cluster specifier + // being set to a RouteAction_Cluster. + RouteConfigClusterSpecifierTypeCluster RouteConfigClusterSpecifierType = iota + // RouteConfigClusterSpecifierTypeWeightedCluster results in the cluster + // specifier being set to RouteAction_WeightedClusters. + RouteConfigClusterSpecifierTypeWeightedCluster + // RouteConfigClusterSpecifierTypeClusterSpecifierPlugin results in the + // cluster specifier being set to a RouteAction_ClusterSpecifierPlugin. + RouteConfigClusterSpecifierTypeClusterSpecifierPlugin +) + +// RouteConfigOptions contains options to configure a RouteConfiguration +// resource. +type RouteConfigOptions struct { + // RouteConfigName is the name of the RouteConfiguration resource. + RouteConfigName string + // ListenerName is the name of the Listener resource which uses this + // RouteConfiguration. + ListenerName string + // ClusterSpecifierType determines the cluster specifier type. + ClusterSpecifierType RouteConfigClusterSpecifierType + + // ClusterName is name of the cluster resource used when the cluster + // specifier type is set to RouteConfigClusterSpecifierTypeCluster. + // + // Default value of "A" is used if left unspecified. + ClusterName string + // WeightedClusters is a map from cluster name to weights, and is used when + // the cluster specifier type is set to + // RouteConfigClusterSpecifierTypeWeightedCluster. + // + // Default value of {"A": 75, "B": 25} is used if left unspecified. + WeightedClusters map[string]int + // The below two fields specify the name of the cluster specifier plugin and + // its configuration, and are used when the cluster specifier type is set to + // RouteConfigClusterSpecifierTypeClusterSpecifierPlugin. Tests are expected + // to provide valid values for these fields when appropriate. + ClusterSpecifierPluginName string + ClusterSpecifierPluginConfig *anypb.Any +} + +// RouteConfigResourceWithOptions returns a RouteConfiguration resource +// configured with the provided options. +func RouteConfigResourceWithOptions(opts RouteConfigOptions) *v3routepb.RouteConfiguration { + switch opts.ClusterSpecifierType { + case RouteConfigClusterSpecifierTypeCluster: + clusterName := opts.ClusterName + if clusterName == "" { + clusterName = "A" + } + return &v3routepb.RouteConfiguration{ + Name: opts.RouteConfigName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{opts.ListenerName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}, + }}, + }}, + } + case RouteConfigClusterSpecifierTypeWeightedCluster: + weightedClusters := opts.WeightedClusters + if weightedClusters == nil { + weightedClusters = map[string]int{"A": 75, "B": 25} + } + clusters := []*v3routepb.WeightedCluster_ClusterWeight{} + for name, weight := range weightedClusters { + clusters = append(clusters, &v3routepb.WeightedCluster_ClusterWeight{ + Name: name, + Weight: &wrapperspb.UInt32Value{Value: uint32(weight)}, + }) + } + return &v3routepb.RouteConfiguration{ + Name: opts.RouteConfigName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{opts.ListenerName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{Clusters: clusters}}, + }}, + }}, + }}, + } + case RouteConfigClusterSpecifierTypeClusterSpecifierPlugin: + return &v3routepb.RouteConfiguration{ + Name: opts.RouteConfigName, + ClusterSpecifierPlugins: []*v3routepb.ClusterSpecifierPlugin{{ + Extension: &v3corepb.TypedExtensionConfig{ + Name: opts.ClusterSpecifierPluginName, + TypedConfig: opts.ClusterSpecifierPluginConfig, + }}, + }, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{opts.ListenerName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{ClusterSpecifierPlugin: opts.ClusterSpecifierPluginName}, + }}, + }}, + }}, + } + default: + panic(fmt.Sprintf("unsupported cluster specifier plugin type: %v", opts.ClusterSpecifierType)) + } +} + +// DefaultCluster returns a basic xds Cluster resource. +func DefaultCluster(clusterName, edsServiceName string, secLevel SecurityLevel) *v3clusterpb.Cluster { + return ClusterResourceWithOptions(ClusterOptions{ + ClusterName: clusterName, + ServiceName: edsServiceName, + Policy: LoadBalancingPolicyRoundRobin, + SecurityLevel: secLevel, + }) +} + +// LoadBalancingPolicy determines the policy used for balancing load across +// endpoints in the Cluster. +type LoadBalancingPolicy int + +const ( + // LoadBalancingPolicyRoundRobin results in the use of the weighted_target + // LB policy to balance load across localities and endpoints in the cluster. + LoadBalancingPolicyRoundRobin LoadBalancingPolicy = iota + // LoadBalancingPolicyRingHash results in the use of the ring_hash LB policy + // as the leaf policy. + LoadBalancingPolicyRingHash +) + +// ClusterOptions contains options to configure a Cluster resource. +type ClusterOptions struct { + // ClusterName is the name of the Cluster resource. + ClusterName string + // ServiceName is the EDS service name of the Cluster. + ServiceName string + // Policy is the LB policy to be used. + Policy LoadBalancingPolicy + // SecurityLevel determines the security configuration for the Cluster. + SecurityLevel SecurityLevel +} + +// ClusterResourceWithOptions returns an xDS Cluster resource configured with +// the provided options. +func ClusterResourceWithOptions(opts ClusterOptions) *v3clusterpb.Cluster { + var tlsContext *v3tlspb.UpstreamTlsContext + switch opts.SecurityLevel { + case SecurityLevelNone: + case SecurityLevelTLS: + tlsContext = &v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ClientSideCertProviderInstance, + }, + }, + }, + } + case SecurityLevelMTLS: + tlsContext = &v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ClientSideCertProviderInstance, + }, + }, + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: ClientSideCertProviderInstance, + }, + }, + } + } + + var lbPolicy v3clusterpb.Cluster_LbPolicy + switch opts.Policy { + case LoadBalancingPolicyRoundRobin: + lbPolicy = v3clusterpb.Cluster_ROUND_ROBIN + case LoadBalancingPolicyRingHash: + lbPolicy = v3clusterpb.Cluster_RING_HASH + } + cluster := &v3clusterpb.Cluster{ + Name: opts.ClusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: opts.ServiceName, + }, + LbPolicy: lbPolicy, + } + if tlsContext != nil { + cluster.TransportSocket = &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(tlsContext), + }, + } + } + return cluster +} + +// LocalityOptions contains options to configure a Locality. +type LocalityOptions struct { + // Name is the unique locality name. + Name string + // Weight is the weight of the locality, used for load balancing. + Weight uint32 + // Backends is a set of backends belonging to this locality. + Backends []BackendOptions +} + +// BackendOptions contains options to configure individual backends in a +// locality. +type BackendOptions struct { + // Port number on which the backend is accepting connections. All backends + // are expected to run on localhost, hence host name is not stored here. + Port uint32 + // Health status of the backend. Default is UNKNOWN which is treated the + // same as HEALTHY. + HealthStatus v3corepb.HealthStatus +} + +// EndpointOptions contains options to configure an Endpoint (or +// ClusterLoadAssignment) resource. +type EndpointOptions struct { + // ClusterName is the name of the Cluster resource (or EDS service name) + // containing the endpoints specified below. + ClusterName string + // Host is the hostname of the endpoints. In our e2e tests, hostname must + // always be "localhost". + Host string + // Localities is a set of localities belonging to this resource. + Localities []LocalityOptions + // DropPercents is a map from drop category to a drop percentage. If unset, + // no drops are configured. + DropPercents map[string]int +} + +// DefaultEndpoint returns a basic xds Endpoint resource. +func DefaultEndpoint(clusterName string, host string, ports []uint32) *v3endpointpb.ClusterLoadAssignment { + var bOpts []BackendOptions + for _, p := range ports { + bOpts = append(bOpts, BackendOptions{Port: p}) + } + return EndpointResourceWithOptions(EndpointOptions{ + ClusterName: clusterName, + Host: host, + Localities: []LocalityOptions{ + { + Backends: bOpts, + Weight: 1, + }, + }, + }) +} + +// EndpointResourceWithOptions returns an xds Endpoint resource configured with +// the provided options. +func EndpointResourceWithOptions(opts EndpointOptions) *v3endpointpb.ClusterLoadAssignment { + var endpoints []*v3endpointpb.LocalityLbEndpoints + for i, locality := range opts.Localities { + var lbEndpoints []*v3endpointpb.LbEndpoint + for _, b := range locality.Backends { + lbEndpoints = append(lbEndpoints, &v3endpointpb.LbEndpoint{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: opts.Host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: b.Port}, + }, + }}, + }}, + HealthStatus: b.HealthStatus, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, + }) + } + + endpoints = append(endpoints, &v3endpointpb.LocalityLbEndpoints{ + Locality: &v3corepb.Locality{ + Region: fmt.Sprintf("region-%d", i+1), + Zone: fmt.Sprintf("zone-%d", i+1), + SubZone: fmt.Sprintf("subzone-%d", i+1), + }, + LbEndpoints: lbEndpoints, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: locality.Weight}, + Priority: 0, + }) + } + + cla := &v3endpointpb.ClusterLoadAssignment{ + ClusterName: opts.ClusterName, + Endpoints: endpoints, + } + + var drops []*v3endpointpb.ClusterLoadAssignment_Policy_DropOverload + for category, val := range opts.DropPercents { + drops = append(drops, &v3endpointpb.ClusterLoadAssignment_Policy_DropOverload{ + Category: category, + DropPercentage: &v3typepb.FractionalPercent{ + Numerator: uint32(val), + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }) + } + if len(drops) != 0 { + cla.Policy = &v3endpointpb.ClusterLoadAssignment_Policy{ + DropOverloads: drops, + } + } + return cla +} diff --git a/internal/testutils/xds/e2e/logging.go b/internal/testutils/xds/e2e/logging.go new file mode 100644 index 000000000000..f524c451b002 --- /dev/null +++ b/internal/testutils/xds/e2e/logging.go @@ -0,0 +1,48 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("xds-e2e") + +// serverLogger implements the Logger interface defined at +// envoyproxy/go-control-plane/pkg/log. This is passed to the Snapshot cache. +type serverLogger struct{} + +func (l serverLogger) Debugf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.InfoDepth(1, msg) +} +func (l serverLogger) Infof(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.InfoDepth(1, msg) +} +func (l serverLogger) Warnf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.WarningDepth(1, msg) +} +func (l serverLogger) Errorf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + logger.ErrorDepth(1, msg) +} diff --git a/internal/testutils/xds/e2e/server.go b/internal/testutils/xds/e2e/server.go new file mode 100644 index 000000000000..d81c3405440b --- /dev/null +++ b/internal/testutils/xds/e2e/server.go @@ -0,0 +1,247 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package e2e provides utilities for end2end testing of xDS functionality. +package e2e + +import ( + "context" + "fmt" + "net" + "reflect" + "strconv" + + "github.com/envoyproxy/go-control-plane/pkg/cache/types" + "google.golang.org/grpc" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3cache "github.com/envoyproxy/go-control-plane/pkg/cache/v3" + v3resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + v3server "github.com/envoyproxy/go-control-plane/pkg/server/v3" +) + +// ManagementServer is a thin wrapper around the xDS control plane +// implementation provided by envoyproxy/go-control-plane. +type ManagementServer struct { + // Address is the host:port on which the management server is listening for + // new connections. + Address string + + // LRSServer points to the fake LRS server implementation. Set only if the + // SupportLoadReportingService option was set to true when creating this + // management server. + LRSServer *fakeserver.Server + + cancel context.CancelFunc // To stop the v3 ADS service. + xs v3server.Server // v3 implementation of ADS. + gs *grpc.Server // gRPC server which exports the ADS service. + cache v3cache.SnapshotCache // Resource snapshot. + version int // Version of resource snapshot. +} + +// ManagementServerOptions contains options to be passed to the management +// server during creation. +type ManagementServerOptions struct { + // Listener to accept connections on. If nil, a TPC listener on a local port + // will be created and used. + Listener net.Listener + + // SupportLoadReportingService, if set, results in the load reporting + // service being registered on the same port as that of ADS. + SupportLoadReportingService bool + + // AllowResourceSubSet allows the management server to respond to requests + // before all configured resources are explicitly named in the request. The + // default behavior that we want is for the management server to wait for + // all configured resources to be requested before responding to any of + // them, since this is how we have run our tests historically, and should be + // set to true only for tests which explicitly require the other behavior. + AllowResourceSubset bool + + // ServerFeaturesIgnoreResourceDeletion, if set, results in a bootstrap config + // where the server features list contains `ignore_resource_deletion`. This + // results in gRPC ignoring resource deletions from the management server, as + // per A53. + ServerFeaturesIgnoreResourceDeletion bool + + // The callbacks defined below correspond to the state of the world (sotw) + // version of the xDS API on the management server. + + // OnStreamOpen is called when an xDS stream is opened. The callback is + // invoked with the assigned stream ID and the type URL from the incoming + // request (or "" for ADS). + // + // Returning an error from this callback will end processing and close the + // stream. OnStreamClosed will still be called. + OnStreamOpen func(context.Context, int64, string) error + + // OnStreamClosed is called immediately prior to closing an xDS stream. The + // callback is invoked with the stream ID of the stream being closed. + OnStreamClosed func(int64, *v3corepb.Node) + + // OnStreamRequest is called when a request is received on the stream. The + // callback is invoked with the stream ID of the stream on which the request + // was received and the received request. + // + // Returning an error from this callback will end processing and close the + // stream. OnStreamClosed will still be called. + OnStreamRequest func(int64, *v3discoverypb.DiscoveryRequest) error + + // OnStreamResponse is called immediately prior to sending a response on the + // stream. The callback is invoked with the stream ID of the stream on which + // the response is being sent along with the incoming request and the outgoing + // response. + OnStreamResponse func(context.Context, int64, *v3discoverypb.DiscoveryRequest, *v3discoverypb.DiscoveryResponse) +} + +// StartManagementServer initializes a management server which implements the +// AggregatedDiscoveryService endpoint. The management server is initialized +// with no resources. Tests should call the Update() method to change the +// resource snapshot held by the management server, as required by the test +// logic. When the test is done, it should call the Stop() method to cleanup +// resources allocated by the management server. +func StartManagementServer(opts ManagementServerOptions) (*ManagementServer, error) { + // Create a snapshot cache. The first parameter to NewSnapshotCache() + // controls whether the server should wait for all resources to be + // explicitly named in the request before responding to any of them. + wait := !opts.AllowResourceSubset + cache := v3cache.NewSnapshotCache(wait, v3cache.IDHash{}, serverLogger{}) + logger.Infof("Created new snapshot cache...") + + lis := opts.Listener + if lis == nil { + var err error + lis, err = net.Listen("tcp", "localhost:0") + if err != nil { + return nil, fmt.Errorf("listening on local host and port: %v", err) + } + } + + // Cancelling the context passed to the server is the only way of stopping it + // at the end of the test. + ctx, cancel := context.WithCancel(context.Background()) + callbacks := v3server.CallbackFuncs{ + StreamOpenFunc: opts.OnStreamOpen, + StreamClosedFunc: opts.OnStreamClosed, + StreamRequestFunc: opts.OnStreamRequest, + StreamResponseFunc: opts.OnStreamResponse, + } + + // Create an xDS management server and register the ADS implementation + // provided by it on a gRPC server. + xs := v3server.NewServer(ctx, cache, callbacks) + gs := grpc.NewServer() + v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(gs, xs) + logger.Infof("Registered Aggregated Discovery Service (ADS)...") + + mgmtServer := &ManagementServer{ + Address: lis.Addr().String(), + cancel: cancel, + version: 0, + gs: gs, + xs: xs, + cache: cache, + } + if opts.SupportLoadReportingService { + lrs := fakeserver.NewServer(lis.Addr().String()) + v3lrsgrpc.RegisterLoadReportingServiceServer(gs, lrs) + mgmtServer.LRSServer = lrs + logger.Infof("Registered Load Reporting Service (LRS)...") + } + + // Start serving. + go gs.Serve(lis) + logger.Infof("xDS management server serving at: %v...", lis.Addr().String()) + + return mgmtServer, nil +} + +// UpdateOptions wraps parameters to be passed to the Update() method. +type UpdateOptions struct { + // NodeID is the id of the client to which this update is to be pushed. + NodeID string + // Endpoints, Clusters, Routes, and Listeners are the updated list of xds + // resources for the server. All must be provided with each Update. + Endpoints []*v3endpointpb.ClusterLoadAssignment + Clusters []*v3clusterpb.Cluster + Routes []*v3routepb.RouteConfiguration + Listeners []*v3listenerpb.Listener + // SkipValidation indicates whether we want to skip validation (by not + // calling snapshot.Consistent()). It can be useful for negative tests, + // where we send updates that the client will NACK. + SkipValidation bool +} + +// Update changes the resource snapshot held by the management server, which +// updates connected clients as required. +func (s *ManagementServer) Update(ctx context.Context, opts UpdateOptions) error { + s.version++ + + // Create a snapshot with the passed in resources. + resources := map[v3resource.Type][]types.Resource{ + v3resource.ListenerType: resourceSlice(opts.Listeners), + v3resource.RouteType: resourceSlice(opts.Routes), + v3resource.ClusterType: resourceSlice(opts.Clusters), + v3resource.EndpointType: resourceSlice(opts.Endpoints), + } + snapshot, err := v3cache.NewSnapshot(strconv.Itoa(s.version), resources) + if err != nil { + return fmt.Errorf("failed to create new snapshot cache: %v", err) + + } + if !opts.SkipValidation { + if err := snapshot.Consistent(); err != nil { + return fmt.Errorf("failed to create new resource snapshot: %v", err) + } + } + logger.Infof("Created new resource snapshot...") + + // Update the cache with the new resource snapshot. + if err := s.cache.SetSnapshot(ctx, opts.NodeID, snapshot); err != nil { + return fmt.Errorf("failed to update resource snapshot in management server: %v", err) + } + logger.Infof("Updated snapshot cache with resource snapshot...") + return nil +} + +// Stop stops the management server. +func (s *ManagementServer) Stop() { + if s.cancel != nil { + s.cancel() + } + s.gs.Stop() +} + +// resourceSlice accepts a slice of any type of proto messages and returns a +// slice of types.Resource. Will panic if there is an input type mismatch. +func resourceSlice(i interface{}) []types.Resource { + v := reflect.ValueOf(i) + rs := make([]types.Resource, v.Len()) + for i := 0; i < v.Len(); i++ { + rs[i] = v.Index(i).Interface().(types.Resource) + } + return rs +} diff --git a/internal/testutils/xds/e2e/setup_certs.go b/internal/testutils/xds/e2e/setup_certs.go new file mode 100644 index 000000000000..799e18564879 --- /dev/null +++ b/internal/testutils/xds/e2e/setup_certs.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "os" + "path" + "testing" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/testdata" +) + +const ( + // Names of files inside tempdir, for certprovider plugin to watch. + certFile = "cert.pem" + keyFile = "key.pem" + rootFile = "ca.pem" +) + +func createTmpFile(src, dst string) error { + data, err := os.ReadFile(src) + if err != nil { + return fmt.Errorf("os.ReadFile(%q) failed: %v", src, err) + } + if err := os.WriteFile(dst, data, os.ModePerm); err != nil { + return fmt.Errorf("os.WriteFile(%q) failed: %v", dst, err) + } + return nil +} + +// createTempDirWithFiles creates a temporary directory under the system default +// tempDir with the given dirSuffix. It also reads from certSrc, keySrc and +// rootSrc files are creates appropriate files under the newly create tempDir. +// Returns the name of the created tempDir. +func createTmpDirWithFiles(dirSuffix, certSrc, keySrc, rootSrc string) (string, error) { + // Create a temp directory. Passing an empty string for the first argument + // uses the system temp directory. + dir, err := os.MkdirTemp("", dirSuffix) + if err != nil { + return "", fmt.Errorf("os.MkdirTemp() failed: %v", err) + } + + if err := createTmpFile(testdata.Path(certSrc), path.Join(dir, certFile)); err != nil { + return "", err + } + if err := createTmpFile(testdata.Path(keySrc), path.Join(dir, keyFile)); err != nil { + return "", err + } + if err := createTmpFile(testdata.Path(rootSrc), path.Join(dir, rootFile)); err != nil { + return "", err + } + return dir, nil +} + +// CreateClientTLSCredentials creates client-side TLS transport credentials +// using certificate and key files from testdata/x509 directory. +func CreateClientTLSCredentials(t *testing.T) credentials.TransportCredentials { + t.Helper() + + cert, err := tls.LoadX509KeyPair(testdata.Path("x509/client1_cert.pem"), testdata.Path("x509/client1_key.pem")) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) + } + b, err := os.ReadFile(testdata.Path("x509/server_ca_cert.pem")) + if err != nil { + t.Fatalf("os.ReadFile(x509/server_ca_cert.pem) failed: %v", err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + t.Fatal("failed to append certificates") + } + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + ServerName: "x.test.example.com", + }) +} diff --git a/internal/testutils/xds/e2e/setup_management_server.go b/internal/testutils/xds/e2e/setup_management_server.go new file mode 100644 index 000000000000..877cdea2c58d --- /dev/null +++ b/internal/testutils/xds/e2e/setup_management_server.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package e2e + +import ( + "encoding/json" + "path" + "testing" + + "github.com/google/uuid" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/resolver" +) + +// SetupManagementServer performs the following: +// - spin up an xDS management server on a local port +// - set up certificates for consumption by the file_watcher plugin +// - creates a bootstrap file in a temporary location +// - creates an xDS resolver using the above bootstrap contents +// +// Returns the following: +// - management server +// - nodeID to be used by the client when connecting to the management server +// - bootstrap contents to be used by the client +// - xDS resolver builder to be used by the client +// - a cleanup function to be invoked at the end of the test +func SetupManagementServer(t *testing.T, opts ManagementServerOptions) (*ManagementServer, string, []byte, resolver.Builder, func()) { + t.Helper() + + // Spin up an xDS management server on a local port. + server, err := StartManagementServer(opts) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer func() { + if err != nil { + server.Stop() + } + }() + + // Create a directory to hold certs and key files used on the server side. + serverDir, err := createTmpDirWithFiles("testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + if err != nil { + server.Stop() + t.Fatal(err) + } + + // Create a directory to hold certs and key files used on the client side. + clientDir, err := createTmpDirWithFiles("testClientSideXDS*", "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + if err != nil { + server.Stop() + t.Fatal(err) + } + + // Create certificate providers section of the bootstrap config with entries + // for both the client and server sides. + cpc := map[string]json.RawMessage{ + ServerSideCertProviderInstance: DefaultFileWatcherConfig(path.Join(serverDir, certFile), path.Join(serverDir, keyFile), path.Join(serverDir, rootFile)), + ClientSideCertProviderInstance: DefaultFileWatcherConfig(path.Join(clientDir, certFile), path.Join(clientDir, keyFile), path.Join(clientDir, rootFile)), + } + + // Create a bootstrap file in a temporary directory. + nodeID := uuid.New().String() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: server.Address, + CertificateProviders: cpc, + ServerListenerResourceNameTemplate: ServerListenerResourceNameTemplate, + }) + if err != nil { + server.Stop() + t.Fatalf("Failed to create bootstrap file: %v", err) + } + + var rb resolver.Builder + if newResolver := internal.NewXDSResolverWithConfigForTesting; newResolver != nil { + rb, err = newResolver.(func([]byte) (resolver.Builder, error))(bootstrapContents) + if err != nil { + server.Stop() + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } + } + + return server, nodeID, bootstrapContents, rb, func() { server.Stop() } +} diff --git a/xds/internal/testutils/fakeserver/server.go b/internal/testutils/xds/fakeserver/server.go similarity index 53% rename from xds/internal/testutils/fakeserver/server.go rename to internal/testutils/xds/fakeserver/server.go index d37c1c3ef0e2..e2f2fb39e0dd 100644 --- a/xds/internal/testutils/fakeserver/server.go +++ b/internal/testutils/xds/fakeserver/server.go @@ -17,10 +17,13 @@ */ // Package fakeserver provides a fake implementation of the management server. +// +// This package is recommended only for scenarios which cannot be tested using +// the xDS management server (which uses envoy-go-control-plane) provided by the +// `internal/testutils/xds/e2e` package. package fakeserver import ( - "context" "fmt" "io" "net" @@ -29,14 +32,13 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/status" - discoverypb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" - lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" + v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" ) const ( @@ -63,6 +65,10 @@ type Response struct { // Server is a fake implementation of xDS and LRS protocols. It listens on the // same port for both services and exposes a bunch of channels to send/receive // messages. +// +// This server is recommended only for scenarios which cannot be tested using +// the xDS management server (which uses envoy-go-control-plane) provided by the +// `internal/testutils/xds/e2e` package. type Server struct { // XDSRequestChan is a channel on which received xDS requests are made // available to the users of this Server. @@ -76,6 +82,12 @@ type Server struct { // LRSResponseChan is a channel on which the Server accepts the LRS // response to be sent to the client. LRSResponseChan chan *Response + // LRSStreamOpenChan is a channel on which the Server sends notifications + // when a new LRS stream is created. + LRSStreamOpenChan *testutils.Channel + // LRSStreamCloseChan is a channel on which the Server sends notifications + // when an existing LRS stream is closed. + LRSStreamCloseChan *testutils.Channel // NewConnChan is a channel on which the fake server notifies receipt of new // connection attempts. Tests can gate on this event before proceeding to // other actions which depend on a connection to the fake server being up. @@ -84,8 +96,8 @@ type Server struct { Address string // The underlying fake implementation of xDS and LRS. - xdsS *xdsServer - lrsS *lrsServer + *xdsServerV3 + *lrsServerV3 } type wrappedListener struct { @@ -102,56 +114,59 @@ func (wl *wrappedListener) Accept() (net.Conn, error) { return c, err } -// StartServer makes a new Server and gets it to start listening on a local -// port for gRPC requests. The returned cancel function should be invoked by -// the caller upon completion of the test. -func StartServer() (*Server, func(), error) { - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, func() {}, fmt.Errorf("net.Listen() failed: %v", err) +// StartServer makes a new Server and gets it to start listening on the given +// net.Listener. If the given net.Listener is nil, a new one is created on a +// local port for gRPC requests. The returned cancel function should be invoked +// by the caller upon completion of the test. +func StartServer(lis net.Listener) (*Server, func(), error) { + if lis == nil { + var err error + lis, err = net.Listen("tcp", "localhost:0") + if err != nil { + return nil, func() {}, fmt.Errorf("net.Listen() failed: %v", err) + } } - s := &Server{ - XDSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - LRSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - NewConnChan: testutils.NewChannelWithSize(defaultChannelBufferSize), - XDSResponseChan: make(chan *Response, defaultChannelBufferSize), - LRSResponseChan: make(chan *Response, 1), // The server only ever sends one response. - Address: lis.Addr().String(), - } - s.xdsS = &xdsServer{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} - s.lrsS = &lrsServer{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan} + s := NewServer(lis.Addr().String()) wp := &wrappedListener{ Listener: lis, server: s, } server := grpc.NewServer() - lrsgrpc.RegisterLoadReportingServiceServer(server, s.lrsS) - adsgrpc.RegisterAggregatedDiscoveryServiceServer(server, s.xdsS) + v3lrsgrpc.RegisterLoadReportingServiceServer(server, s) + v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(server, s) go server.Serve(wp) return s, func() { server.Stop() }, nil } -// XDSClientConn returns a grpc.ClientConn connected to the fakeServer. -func (xdsS *Server) XDSClientConn() (*grpc.ClientConn, func(), error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultDialTimeout) - defer cancel() - - cc, err := grpc.DialContext(ctx, xdsS.Address, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock()) - if err != nil { - return nil, nil, fmt.Errorf("grpc.DialContext(%s) failed: %v", xdsS.Address, err) +// NewServer returns a new instance of Server, set to accept requests on addr. +// It is the responsibility of the caller to register the exported ADS and LRS +// services on an appropriate gRPC server. Most usages should prefer +// StartServer() instead of this. +func NewServer(addr string) *Server { + s := &Server{ + XDSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + LRSRequestChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + NewConnChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + XDSResponseChan: make(chan *Response, defaultChannelBufferSize), + LRSResponseChan: make(chan *Response, 1), // The server only ever sends one response. + LRSStreamOpenChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + LRSStreamCloseChan: testutils.NewChannelWithSize(defaultChannelBufferSize), + Address: addr, } - return cc, func() { cc.Close() }, nil + s.xdsServerV3 = &xdsServerV3{reqChan: s.XDSRequestChan, respChan: s.XDSResponseChan} + s.lrsServerV3 = &lrsServerV3{reqChan: s.LRSRequestChan, respChan: s.LRSResponseChan, streamOpenChan: s.LRSStreamOpenChan, streamCloseChan: s.LRSStreamCloseChan} + return s } -type xdsServer struct { +type xdsServerV3 struct { reqChan *testutils.Channel respChan chan *Response } -func (xdsS *xdsServer) StreamAggregatedResources(s adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { +func (xdsS *xdsServerV3) StreamAggregatedResources(s v3discoverygrpc.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error { errCh := make(chan error, 2) go func() { for { @@ -176,7 +191,7 @@ func (xdsS *xdsServer) StreamAggregatedResources(s adsgrpc.AggregatedDiscoverySe retErr = r.Err return } - if err := s.Send(r.Resp.(*discoverypb.DiscoveryResponse)); err != nil { + if err := s.Send(r.Resp.(*v3discoverypb.DiscoveryResponse)); err != nil { retErr = err return } @@ -193,16 +208,21 @@ func (xdsS *xdsServer) StreamAggregatedResources(s adsgrpc.AggregatedDiscoverySe return nil } -func (xdsS *xdsServer) DeltaAggregatedResources(adsgrpc.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { +func (xdsS *xdsServerV3) DeltaAggregatedResources(v3discoverygrpc.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error { return status.Error(codes.Unimplemented, "") } -type lrsServer struct { - reqChan *testutils.Channel - respChan chan *Response +type lrsServerV3 struct { + reqChan *testutils.Channel + respChan chan *Response + streamOpenChan *testutils.Channel + streamCloseChan *testutils.Channel } -func (lrsS *lrsServer) StreamLoadStats(s lrsgrpc.LoadReportingService_StreamLoadStatsServer) error { +func (lrsS *lrsServerV3) StreamLoadStats(s v3lrsgrpc.LoadReportingService_StreamLoadStatsServer) error { + lrsS.streamOpenChan.Send(nil) + defer lrsS.streamCloseChan.Send(nil) + req, err := s.Recv() lrsS.reqChan.Send(&Request{req, err}) if err != nil { @@ -214,7 +234,7 @@ func (lrsS *lrsServer) StreamLoadStats(s lrsgrpc.LoadReportingService_StreamLoad if r.Err != nil { return r.Err } - if err := s.Send(r.Resp.(*lrspb.LoadStatsResponse)); err != nil { + if err := s.Send(r.Resp.(*v3lrspb.LoadStatsResponse)); err != nil { return err } case <-s.Context().Done(): diff --git a/internal/transport/controlbuf.go b/internal/transport/controlbuf.go index 40ef23923fda..be5a9c81eb97 100644 --- a/internal/transport/controlbuf.go +++ b/internal/transport/controlbuf.go @@ -20,13 +20,19 @@ package transport import ( "bytes" + "errors" "fmt" + "net" "runtime" + "strconv" "sync" "sync/atomic" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" ) var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { @@ -128,6 +134,16 @@ type cleanupStream struct { func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + type dataFrame struct { streamID uint32 endStream bool @@ -177,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -195,6 +211,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -284,7 +308,7 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // *chan struct{} + trfChan atomic.Value // chan struct{} } func newControlBuffer(done <-chan struct{}) *controlBuffer { @@ -298,10 +322,10 @@ func newControlBuffer(done <-chan struct{}) *controlBuffer { // throttle blocks if there are too many incomingSettings/cleanupStreams in the // controlbuf. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(*chan struct{}) + ch, _ := c.trfChan.Load().(chan struct{}) if ch != nil { select { - case <-*ch: + case <-ch: case <-c.done: } } @@ -335,8 +359,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - ch := make(chan struct{}) - c.trfChan.Store(&ch) + c.trfChan.Store(make(chan struct{})) } } c.mu.Unlock() @@ -377,9 +400,9 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are removing the frame that put us over the // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(*chan struct{}) - close(*ch) - c.trfChan.Store((*chan struct{})(nil)) + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) } c.transportResponseFrames-- } @@ -395,8 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - c.finish() - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -420,6 +442,14 @@ func (c *controlBuffer) finish() { hdr.onOrphaned(ErrConnClosing) } } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) c.mu.Unlock() } @@ -458,12 +488,14 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn + logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -476,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, + conn: conn, + logger: logger, } return l } @@ -493,23 +527,26 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() }() for { it, err := l.cbuf.get(true) @@ -554,7 +591,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -563,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -575,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -586,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -600,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) } return nil } @@ -635,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -662,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) } } } @@ -701,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -713,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -724,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -743,8 +774,35 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } } return nil } @@ -753,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") } } return nil @@ -774,7 +833,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -784,25 +843,32 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -821,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -838,9 +903,9 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possilbe HTTP2 frame size. + // maximum possible HTTP2 frame size. if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true @@ -855,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) diff --git a/internal/transport/defaults.go b/internal/transport/defaults.go index 9fa306b2e07a..bc8ee0747496 100644 --- a/internal/transport/defaults.go +++ b/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/internal/transport/flowcontrol.go b/internal/transport/flowcontrol.go index f262edd8ecda..97198c515889 100644 --- a/internal/transport/flowcontrol.go +++ b/internal/transport/flowcontrol.go @@ -136,12 +136,10 @@ type inFlow struct { // newLimit updates the inflow window to a new value n. // It assumes that n is always greater than the old limit. -func (f *inFlow) newLimit(n uint32) uint32 { +func (f *inFlow) newLimit(n uint32) { f.mu.Lock() - d := n - f.limit f.limit = n f.mu.Unlock() - return d } func (f *inFlow) maybeAdjust(n uint32) uint32 { diff --git a/internal/transport/handler_server.go b/internal/transport/handler_server.go index 05d3871e628d..98f80e3fa00a 100644 --- a/internal/transport/handler_server.go +++ b/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -46,24 +47,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta contentSubtype: contentSubtype, stats: stats, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -138,16 +152,19 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats stats.Handler + stats []stats.Handler + logger *grpclog.PrefixLogger } -func (ht *serverHandlerTransport) Close() error { - ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -229,15 +246,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) if err == nil { // transport has not been closed - if ht.stats != nil { - // Note: The trailer fields are compressed with hpack after this call returns. - // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -315,10 +332,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { }) if err == nil { - if ht.stats != nil { + for _, sh := range ht.stats { // Note: The header fields are compressed with hpack after this call returns. // No WireLength field is set here. - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + sh.HandleRPC(s.Context(), &stats.OutHeader{ Header: md.Copy(), Compression: s.sendCompress, }) @@ -347,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req @@ -370,14 +387,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) - if ht.stats != nil { - s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: ht.RemoteAddr(), Compression: s.recvCompress, } - ht.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, @@ -436,17 +453,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/internal/transport/handler_server_test.go b/internal/transport/handler_server_test.go index f9efdfb0716d..99ca211b323c 100644 --- a/internal/transport/handler_server_test.go +++ b/internal/transport/handler_server_test.go @@ -41,11 +41,12 @@ import ( func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { type testCase struct { - name string - req *http.Request - wantErr string - modrw func(http.ResponseWriter) http.ResponseWriter - check func(*serverHandlerTransport, *testCase) error + name string + req *http.Request + wantErr string + wantErrCode int + modrw func(http.ResponseWriter) http.ResponseWriter + check func(*serverHandlerTransport, *testCase) error } tests := []testCase{ { @@ -54,7 +55,8 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { ProtoMajor: 1, ProtoMinor: 1, }, - wantErr: "gRPC requires HTTP/2", + wantErr: "gRPC requires HTTP/2", + wantErrCode: http.StatusBadRequest, }, { name: "bad method", @@ -62,9 +64,9 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { ProtoMajor: 2, Method: "GET", Header: http.Header{}, - RequestURI: "/", }, - wantErr: "invalid gRPC request method", + wantErr: `invalid gRPC request method "GET"`, + wantErrCode: http.StatusBadRequest, }, { name: "bad content type", @@ -74,9 +76,9 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { Header: http.Header{ "Content-Type": {"application/foo"}, }, - RequestURI: "/service/foo.bar", }, - wantErr: "invalid gRPC request content-type", + wantErr: `invalid gRPC request content-type "application/foo"`, + wantErrCode: http.StatusUnsupportedMediaType, }, { name: "not flusher", @@ -86,7 +88,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { Header: http.Header{ "Content-Type": {"application/grpc"}, }, - RequestURI: "/service/foo.bar", }, modrw: func(w http.ResponseWriter) http.ResponseWriter { // Return w without its Flush method @@ -96,7 +97,8 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { } return struct{ onlyCloseNotifier }{w.(onlyCloseNotifier)} }, - wantErr: "gRPC requires a ResponseWriter supporting http.Flusher", + wantErr: "gRPC requires a ResponseWriter supporting http.Flusher", + wantErrCode: http.StatusInternalServerError, }, { name: "valid", @@ -109,7 +111,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", }, check: func(t *serverHandlerTransport, tt *testCase) error { if t.req != tt.req { @@ -133,7 +134,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", }, check: func(t *serverHandlerTransport, tt *testCase) error { if !t.timeoutSet { @@ -157,9 +157,9 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", }, - wantErr: `rpc error: code = Internal desc = malformed time-out: transport: timeout unit is not recognized: "tomorrow"`, + wantErr: `rpc error: code = Internal desc = malformed grpc-timeout: transport: timeout unit is not recognized: "tomorrow"`, + wantErrCode: http.StatusBadRequest, }, { name: "with metadata", @@ -175,7 +175,6 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", }, check: func(ht *serverHandlerTransport, tt *testCase) error { want := metadata.MD{ @@ -194,7 +193,12 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { } for _, tt := range tests { - rw := newTestHandlerResponseWriter() + rrec := httptest.NewRecorder() + rw := http.ResponseWriter(testHandlerResponseWriter{ + ResponseRecorder: rrec, + closeNotify: make(chan bool, 1), + }) + if tt.modrw != nil { rw = tt.modrw(rw) } @@ -203,6 +207,13 @@ func (s) TestHandlerTransport_NewServerHandlerTransport(t *testing.T) { t.Errorf("%s: error = %q; want %q", tt.name, gotErr.Error(), tt.wantErr) continue } + if tt.wantErrCode == 0 { + tt.wantErrCode = http.StatusOK + } + if rrec.Code != tt.wantErrCode { + t.Errorf("%s: code = %d; want %d", tt.name, rrec.Code, tt.wantErrCode) + continue + } if gotErr != nil { continue } @@ -247,8 +258,7 @@ func newHandleStreamTest(t *testing.T) *handleStreamTest { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", - Body: bodyr, + Body: bodyr, } rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) ht, err := NewServerHandlerTransport(rw, req, nil) @@ -270,31 +280,36 @@ func (s) TestHandlerTransport_HandleStreams(t *testing.T) { t.Errorf("stream method = %q; want %q", s.method, want) } - err := s.SetHeader(metadata.Pairs("custom-header", "Custom header value")) - if err != nil { + if err := s.SetHeader(metadata.Pairs("custom-header", "Custom header value")); err != nil { t.Error(err) } - err = s.SetTrailer(metadata.Pairs("custom-trailer", "Custom trailer value")) - if err != nil { + + if err := s.SetTrailer(metadata.Pairs("custom-trailer", "Custom trailer value")); err != nil { + t.Error(err) + } + + if err := s.SetSendCompress("gzip"); err != nil { t.Error(err) } md := metadata.Pairs("custom-header", "Another custom header value") - err = s.SendHeader(md) - delete(md, "custom-header") - if err != nil { + if err := s.SendHeader(md); err != nil { t.Error(err) } + delete(md, "custom-header") - err = s.SetHeader(metadata.Pairs("too-late", "Header value that should be ignored")) - if err == nil { + if err := s.SetHeader(metadata.Pairs("too-late", "Header value that should be ignored")); err == nil { t.Error("expected SetHeader call after SendHeader to fail") } - err = s.SendHeader(metadata.Pairs("too-late", "This header value should be ignored as well")) - if err == nil { + + if err := s.SendHeader(metadata.Pairs("too-late", "This header value should be ignored as well")); err == nil { t.Error("expected second SendHeader call to fail") } + if err := s.SetSendCompress("snappy"); err == nil { + t.Error("expected second SetSendCompress call to fail") + } + st.bodyw.Close() // no body st.ht.WriteStatus(s, status.New(codes.OK, "")) } @@ -307,6 +322,7 @@ func (s) TestHandlerTransport_HandleStreams(t *testing.T) { "Content-Type": {"application/grpc"}, "Trailer": {"Grpc-Status", "Grpc-Message", "Grpc-Status-Details-Bin"}, "Custom-Header": {"Custom header value", "Another custom header value"}, + "Grpc-Encoding": {"gzip"}, } wantTrailer := http.Header{ "Grpc-Status": {"0"}, @@ -359,8 +375,7 @@ func (s) TestHandlerTransport_HandleStreams_Timeout(t *testing.T) { URL: &url.URL{ Path: "/service/foo.bar", }, - RequestURI: "/service/foo.bar", - Body: bodyr, + Body: bodyr, } rw := newTestHandlerResponseWriter().(testHandlerResponseWriter) ht, err := NewServerHandlerTransport(rw, req, nil) @@ -487,6 +502,15 @@ func (s) TestHandlerTransport_HandleStreams_ErrDetails(t *testing.T) { checkHeaderAndTrailer(t, hst.rw, wantHeader, wantTrailer) } +// TestHandlerTransport_Drain verifies that Drain() is not implemented +// by `serverHandlerTransport`. +func (s) TestHandlerTransport_Drain(t *testing.T) { + defer func() { recover() }() + st := newHandleStreamTest(t) + st.ht.Drain("whatever") + t.Errorf("serverHandlerTransport.Drain() should have panicked") +} + // checkHeaderAndTrailer checks that the resulting header and trailer matches the expectation. func checkHeaderAndTrailer(t *testing.T, rw testHandlerResponseWriter, wantHeader, wantTrailer http.Header) { // For trailer-only responses, the trailer values might be reported as part of the Header. They will however diff --git a/internal/transport/http2_client.go b/internal/transport/http2_client.go index d5bbe720db54..326bf0848000 100644 --- a/internal/transport/http2_client.go +++ b/internal/transport/http2_client.go @@ -24,6 +24,8 @@ import ( "io" "math" "net" + "net/http" + "path/filepath" "strconv" "strings" "sync" @@ -32,15 +34,17 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/internal/grpcutil" - imetadata "google.golang.org/grpc/internal/metadata" - "google.golang.org/grpc/internal/transport/networktype" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -56,11 +60,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -77,6 +85,7 @@ type http2Client struct { framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. controlBuf *controlBuffer fc *trInFlow // The scheme used: https if TLS is on, http otherwise. @@ -89,7 +98,7 @@ type http2Client struct { kp keepalive.ClientParameters keepaliveEnabled bool - statsHandler stats.Handler + statsHandlers []stats.Handler initialWindowSize int32 @@ -97,17 +106,15 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string + // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables state transportState activeStreams map[uint32]*Stream @@ -116,6 +123,9 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string // A condition variable used to signal when the keepalive goroutine should // go dormant. The condition for dormancy is based on the number of active // streams and the `PermitWithoutStream` keepalive client parameter. And @@ -128,28 +138,35 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool connectionID uint64 + logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. if networkType == "unix" && !strings.HasPrefix(address, "\x00") { - // For backward compatibility, if the user dialed "unix:///path", - // the passthrough resolver would be used and the user's custom - // dialer would see "unix:///path". Since the unix resolver is used - // and the address is now "/path", prepend "unix://" so the user's - // custom dialer sees the same address. - return fn(ctx, "unix://"+address) + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) } return fn(ctx, address) } @@ -181,7 +198,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -190,19 +207,51 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }() + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -234,12 +283,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - // gRPC, resolver, balancer etc. can specify arbitrary data in the - // Attributes field of resolver.Address, which is shoved into connectCtx - // and passed to the credential handshaker. This makes it possible for - // address specific arbitrary data to reach the credential handshaker. - contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) - connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) @@ -278,6 +321,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -292,19 +337,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, - statsHandler: opts.StatsHandler, + statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) if md, ok := addr.Metadata.(*metadata.MD); ok { t.md = *md @@ -322,37 +368,51 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts updateFlowControl: t.updateFlowControl, } } - if t.statsHandler != nil { - t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, err } if n != len(clientPreface) { - t.Close() - return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, err } var ss []http2.Setting @@ -370,14 +430,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } err = t.framer.fr.WriteSettings(ss...) if err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { - t.Close() - return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, err } } @@ -387,18 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } - } - // If it's a connection error, let reader goroutine handle it - // since there might be data in the buffers. - if _, ok := err.(net.Error); !ok { - t.conn.Close() - } + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.run() close(t.writerDone) }() return t, nil @@ -444,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ Addr: t.remoteAddr, - AuthInfo: t.authInfo, + AuthInfo: t.authInfo, // Can be nil } } @@ -454,7 +504,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) Method: callHdr.Method, AuthInfo: t.authInfo, } - ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err @@ -480,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -562,11 +625,15 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } - return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -591,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -603,26 +677,46 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } -// PerformedIOError wraps an error to indicate IO may have been performed -// before the error occurred. -type PerformedIOError struct { +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { Err error + + AllowTransparentRetry bool } -// Error implements error. -func (p PerformedIOError) Error() string { - return p.Err.Error() +func (e NewStreamError) Error() string { + return e.Err.Error() } // NewStream creates a stream and registers it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - // We may have performed I/O in the per-RPC creds callback, so do not - // allow transparent retry. - return nil, PerformedIOError{err} + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -644,17 +738,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } - t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) @@ -671,6 +761,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } firstTry := true var ch chan struct{} + transportDrainRequired := false checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { @@ -686,8 +777,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -713,52 +816,56 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } for { success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { - if !checkForStreamQuota(it) { - return false - } - if !checkForHeaderListSize(it) { - return false - } - return true + return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { - return nil, err + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} } if success { break } if hdrListSizeErr != nil { - return nil, hdrListSizeErr + return nil, &NewStreamError{Err: hdrListSizeErr} } firstTry = false select { case <-ch: - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} case <-t.goAway: - return nil, errStreamDrain + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} case <-t.ctx.Done(): - return nil, ErrConnClosing + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} } } - if t.statsHandler != nil { + if len(t.statsHandlers) != 0 { header, ok := metadata.FromOutgoingContext(ctx) if ok { header.Set("user-agent", t.userAgent) } else { header = metadata.Pairs("user-agent", t.userAgent) } - // Note: The header fields are compressed with hpack after this call returns. - // No WireLength field is set here. - outHeader := &stats.OutHeader{ - Client: true, - FullMethod: callHdr.Method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: callHdr.SendCompress, - Header: header, + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") } - t.statsHandler.HandleRPC(s.ctx, outHeader) + t.GracefulClose() } return s, nil } @@ -841,20 +948,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. -func (t *http2Client) Close() error { +func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() - return nil + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. - t.onClose() t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -866,21 +974,30 @@ func (t *http2Client) Close() error { t.mu.Unlock() t.controlBuf.finish() t.cancel() - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + t.conn.Close() + channelz.RemoveEntry(t.channelzID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) } + // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) } - if t.statsHandler != nil { + for _, sh := range t.statsHandlers { connEnd := &stats.ConnEnd{ Client: true, } - t.statsHandler.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } - return err } // GracefulClose sets the state to draining, which prevents new streams from @@ -895,11 +1012,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -959,13 +1080,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - t.mu.Lock() - for _, s := range t.activeStreams { - s.fc.newLimit(n) - } - t.mu.Unlock() updateIWS := func(interface{}) bool { t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() return true } t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) @@ -1040,7 +1161,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } // The server has closed the stream without sending trailers. Record that // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) } } @@ -1056,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) } statusCode = codes.Unknown } @@ -1139,15 +1260,17 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - if logger.V(logLevel) { - logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID - if id > 0 && id%2 != 1 { + if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1165,18 +1288,20 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) return } default: t.setGoAwayReason(f) close(t.goAway) - t.controlBuf.put(&incomingGoAway{}) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1184,24 +1309,35 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { if upperLimit == 0 { // This is the first GoAway Frame. upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) - t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } } } - t.prevGoAwayID = id - active := len(t.activeStreams) t.mu.Unlock() - if active == 0 { - t.Close() + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } } // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1211,12 +1347,17 @@ func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayTooManyPings } } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } } -func (t *http2Client) GetGoAwayReason() GoAwayReason { +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { t.mu.Lock() defer t.mu.Unlock() - return t.goAwayReason + return t.goAwayReason, t.goAwayDebugMessage } func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { @@ -1243,35 +1384,128 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - state := &decodeState{} - // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. - state.data.isGRPC = !initialHeader - if h2code, err := state.decodeHeader(frame); err != nil { - t.closeStream(s, err, true, h2code, status.Convert(err), nil, endStream) + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) return } - isHeader := false - defer func() { - if t.statsHandler != nil { - if isHeader { - inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), - Header: s.header.Copy(), - Compression: s.recvCompress, - } - t.statsHandler.HandleRPC(s.ctx, inHeader) - } else { - inTrailer := &stats.InTrailer{ - Client: true, - WireLength: int(frame.Header().Length), - Trailer: s.trailer.Copy(), - } - t.statsHandler.HandleRPC(s.ctx, inTrailer) + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) } - }() + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + isHeader := false // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { @@ -1282,9 +1516,9 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. - s.recvCompress = state.data.encoding - if len(state.data.mdata) > 0 { - s.header = state.data.mdata + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata } } else { // HEADERS frame block carries a Trailers-Only. @@ -1293,40 +1527,67 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { close(s.headerChan) } + for _, sh := range t.statsHandlers { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) + } + } + if !endStream { return } + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + // if client received END_STREAM from server while stream was still active, send RST_STREAM rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - t.Close() // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - t.Close() // this kicks off resetTransport, so must be last before return - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { @@ -1358,7 +1619,7 @@ func (t *http2Client) reader() { continue } else { // Transport error. - t.Close() + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) return } } @@ -1392,7 +1653,7 @@ func minTime(a, b time.Duration) time.Duration { return b } -// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. @@ -1417,7 +1678,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close() + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) return } t.mu.Lock() @@ -1529,3 +1790,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/internal/transport/http2_server.go b/internal/transport/http2_server.go index 7c6c89d4f9b2..f9606401289d 100644 --- a/internal/transport/http2_server.go +++ b/internal/transport/http2_server.go @@ -35,12 +35,16 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -52,10 +56,10 @@ import ( var ( // ErrIllegalHeaderWrite indicates that setting header is illegal because of // the stream's state. - ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. - ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") ) // serverConnectionCounter counts the number of connections a server has seen @@ -73,7 +77,6 @@ type http2Server struct { writerDone chan struct{} // sync point to enable testing. remoteAddr net.Addr localAddr net.Addr - maxStreamID uint32 // max stream ID ever seen authInfo credentials.AuthInfo // auth info about the connection inTapHandle tap.ServerInHandle framer *framer @@ -83,7 +86,7 @@ type http2Server struct { // updates, reset streams, and various settings) to the controller. controlBuf *controlBuffer fc *trInFlow - stats stats.Handler + stats []stats.Handler // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters // Keepalive enforcement policy. @@ -102,13 +105,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when drain(...) is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -118,16 +121,44 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } writeBufSize := config.WriteBufferSize readBufSize := config.ReadBufferSize maxHeaderListSize := defaultServerMaxHeaderListSize @@ -206,18 +237,24 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } + done := make(chan struct{}) t := &http2Server{ - ctx: context.Background(), + ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), - authInfo: config.AuthInfo, + authInfo: authInfo, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), @@ -226,7 +263,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err fc: &trInFlow{limit: uint32(icwz)}, state: reachable, activeStreams: make(map[uint32]*Stream), - stats: config.StatsHandler, + stats: config.StatsHandlers, kp: kp, idle: time.Now(), kep: kep, @@ -234,6 +271,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err czData: new(channelzData), bufferPool: newBufferPool(), } + t.logger = prefixLoggerForServerTransport(t) + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ @@ -241,31 +282,39 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err updateFlowControl: t.updateFlowControl, } } - if t.stats != nil { - t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, }) connBegin := &stats.ConnBegin{} - t.stats.HandleConn(t.ctx, connBegin) + sh.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { if err != nil { - t.Close() + t.Close(err) } }() // Check the validity of client preface. preface := make([]byte, len(clientPreface)) if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) } if !bytes.Equal(preface, clientPreface) { @@ -287,100 +336,206 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } - } - t.conn.Close() + t.loopy.run() close(t.writerDone) }() go t.keepalive() return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + streamID := frame.Header().StreamID - state := &decodeState{ - serverSide: true, - } - if h2code, err := state.decodeHeader(frame); err != nil { - if _, ok := status.FromError(err); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: h2code, - onWrite: func() {}, - }) - } - return false + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return nil + } + + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } + t.maxStreamID = streamID buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, - recvCompress: state.data.encoding, - method: state.data.method, - contentSubtype: state.data.contentSubtype, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + var ( + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + contentType = hf.Value + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + } + protocolError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return nil } + + if protocolError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + if frame.StreamEnded() { // s is just created by the caller. No lock needed. s.state = streamReadDone } - if state.data.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) } else { s.ctx, s.cancel = context.WithCancel(t.ctx) } - pr := &peer.Peer{ - Addr: t.remoteAddr, - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. - if len(state.data.mdata) > 0 { - s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) - } - if state.data.statsTags != nil { - s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) - } - if state.data.statsTrace != nil { - s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) - } - if t.inTapHandle != nil { - var err error - info := &tap.Info{ - FullMethodName: state.data.method, + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) } - s.ctx, err = t.inTapHandle(s.ctx, info) - if err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) - } - t.controlBuf.put(&cleanupStream{ - streamID: s.id, - rst: true, - rstCode: http2.ErrCodeRefusedStream, - onWrite: func() {}, - }) - s.cancel() - return false + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -391,31 +546,44 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } - if streamID%2 != 1 || streamID <= t.maxStreamID { + if httpMethod != http.MethodPost { t.mu.Unlock() - // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) s.cancel() - return true + return nil } - t.maxStreamID = streamID - if state.data.httpMethod != http.MethodPost { - t.mu.Unlock() - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", state.data.httpMethod) + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return nil } - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: http2.ErrCodeProtocol, - onWrite: func() {}, - }) - s.cancel() - return false } t.activeStreams[streamID] = s if len(t.activeStreams) == 1 { @@ -430,17 +598,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( t.adjustWindow(s, uint32(n)) } s.ctx = traceCtx(s.ctx, s.method) - if t.stats != nil { - s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ FullMethod: s.method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(state.data.mdata).Copy(), + Header: mdata.Copy(), } - t.stats.HandleRPC(s.ctx, inHeader) + sh.HandleRPC(s.ctx, inHeader) } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) @@ -461,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -475,8 +643,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -494,19 +662,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -522,8 +687,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) } } } @@ -650,7 +815,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) { s.write(recvMsg{buffer: buffer}) } } - if f.Header().Flags.Has(http2.FlagDataEndStream) { + if f.StreamEnded() { // Received the end of stream from the client. s.compareAndSwapState(streamActive, streamReadDone) s.write(recvMsg{err: io.EOF}) @@ -707,8 +872,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -750,10 +915,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -785,8 +947,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if logger.V(logLevel) { - logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -794,12 +956,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + // WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - if s.updateHeaderSent() || s.getState() == streamDone { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { return ErrIllegalHeaderWrite } - s.hdrMu.Lock() + if md.Len() > 0 { if s.header.Len() > 0 { s.header = metadata.Join(s.header, md) @@ -808,10 +985,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() - return err + return status.Convert(err).Err() } - s.hdrMu.Unlock() return nil } @@ -842,14 +1017,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { t.closeStream(s, true, http2.ErrCodeInternal, false) return ErrHeaderListSizeLimitViolation } - if t.stats != nil { + for _, sh := range t.stats { // Note: Headers are compressed with hpack after this call returns. // No WireLength field is set here. outHeader := &stats.OutHeader{ Header: s.header.Copy(), Compression: s.sendCompress, } - t.stats.HandleRPC(s.Context(), outHeader) + sh.HandleRPC(s.Context(), outHeader) } return nil } @@ -859,17 +1034,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { return nil } - s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. if err := t.writeHeaderLocked(s); err != nil { - s.hdrMu.Unlock() return err } } else { // Send a trailer only response. @@ -884,7 +1061,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -898,7 +1075,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { endStream: true, onWrite: t.setResetPingStrikes, } - s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) if !success { if err != nil { @@ -910,10 +1087,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Send a RST_STREAM after the trailers if the client has not already half-closed. rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) - if t.stats != nil { + for _, sh := range t.stats { // Note: The trailer fields are compressed with hpack after this call returns. // No WireLength field is set here. - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + sh.HandleRPC(s.Context(), &stats.OutTrailer{ Trailer: s.trailer.Copy(), }) } @@ -925,23 +1102,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return status.Errorf(codes.Internal, "transport: %v", err) + return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { - // TODO(mmukhi, dfawley): Should the server write also return io.EOF? - s.cancel() - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } } df := &dataFrame{ @@ -951,12 +1117,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e onEachWrite: t.setResetPingStrikes, } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { - select { - case <-t.done: - return ErrConnClosing - default: - } - return ContextErr(s.ctx.Err()) + return t.streamContextErr(s) } return t.controlBuf.put(df) } @@ -1005,20 +1166,20 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.drain(http2.ErrCodeNo, []byte{}) + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to maximum connection age.") + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1034,10 +1195,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1064,11 +1222,14 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() error { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return errors.New("transport: Close() was already called") + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) } t.state = closing streams := t.activeStreams @@ -1076,27 +1237,22 @@ func (t *http2Server) Close() error { t.mu.Unlock() t.controlBuf.finish() close(t.done) - err := t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() } - if t.stats != nil { + for _, sh := range t.stats { connEnd := &stats.ConnEnd{} - t.stats.HandleConn(t.ctx, connEnd) + sh.HandleConn(t.ctx, connEnd) } - return err } // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1118,6 +1274,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { // finishStream closes the stream and puts the trailing headerFrame into controlbuf. func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + oldState := s.swapState(streamDone) if oldState == streamDone { // If the stream was already done, return. @@ -1137,6 +1298,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h // closeStream clears the footprint of a stream when the stream is not needed any more. func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + s.swapState(streamDone) t.deleteStream(s, eosReceived) @@ -1152,18 +1318,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { - t.drain(http2.ErrCodeNo, []byte{}) -} - -func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1171,39 +1333,41 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} // Handles outgoing GoAway and returns true if loopy needs to put itself // in draining mode. func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() t.mu.Lock() if t.state == closing { // TODO(mmukhi): This seems unnecessary. t.mu.Unlock() + t.maxStreamMu.Unlock() // The transport is closing. return false, ErrConnClosing } - sid := t.maxStreamID if !g.headsUp { // Stop accepting more streams now. t.state = draining + sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() + t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + if retErr != nil { + return false, retErr } return true, nil } t.mu.Unlock() + t.maxStreamMu.Unlock() // For a graceful close, send out a GoAway with stream ID of MaxUInt32, // Follow that with a ping and wait for the ack to come back or a timer // to expire. During this time accept new streams since they might have // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { @@ -1213,7 +1377,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return @@ -1272,6 +1436,13 @@ func (t *http2Server) getOutFlowWindow() int64 { } } +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + func getJitter(v time.Duration) time.Duration { if v == infinity { return 0 @@ -1281,3 +1452,18 @@ func getJitter(v time.Duration) time.Duration { j := grpcrand.Int63n(2*r) - r return time.Duration(j) } + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/internal/transport/http_util.go b/internal/transport/http_util.go index c7dee140cf1a..19cbb18f5ab4 100644 --- a/internal/transport/http_util.go +++ b/internal/transport/http_util.go @@ -20,8 +20,8 @@ package transport import ( "bufio" - "bytes" "encoding/base64" + "errors" "fmt" "io" "math" @@ -38,22 +38,14 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) const ( // http2MaxFrameLen specifies the max length of a HTTP2 frame. http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues + // https://httpwg.org/specs/rfc7540.html#SettingValues http2InitHeaderTableSize = 4096 - // baseContentType is the base content-type for gRPC. This is a valid - // content-type on it's own, but can also include a content-subtype such as - // "proto" as a suffix after "+" or ";". See - // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests - // for more details. - ) var ( @@ -93,56 +85,8 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } - logger = grpclog.Component("transport") ) -type parsedHeaderData struct { - encoding string - // statusGen caches the stream status received from the trailer the server - // sent. Client side only. Do not access directly. After all trailers are - // parsed, use the status method to retrieve the status. - statusGen *status.Status - // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not - // intended for direct access outside of parsing. - rawStatusCode *int - rawStatusMsg string - httpStatus *int - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - httpMethod string - // key-value metadata map from the peer. - mdata map[string][]string - statsTags []byte - statsTrace []byte - contentSubtype string - - // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). - // - // We are in gRPC mode (peer speaking gRPC) if: - // * We are client side and have already received a HEADER frame that indicates gRPC peer. - // * The header contains valid a content-type, i.e. a string starts with "application/grpc" - // And we should handle error specific to gRPC. - // - // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we - // are in HTTP fallback mode, and should handle error specific to HTTP. - isGRPC bool - grpcErr error - httpErr error - contentTypeErr string -} - -// decodeState configures decoding criteria and records the decoded data. -type decodeState struct { - // whether decoding on server side or not - serverSide bool - - // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS - // frame once decodeHeader function has been invoked and returned. - data parsedHeaderData -} - // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -180,14 +124,6 @@ func isWhitelistedHeader(hdr string) bool { } } -func (d *decodeState) status() *status.Status { - if d.data.statusGen == nil { - // No status-details were provided; generate status using code/msg. - d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) - } - return d.data.statusGen -} - const binHdrSuffix = "-bin" func encodeBinHeader(v []byte) string { @@ -217,168 +153,16 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) (http2.ErrCode, error) { - // frame.Truncated is set to true when framer detects that the current header - // list size hits MaxHeaderListSize limit. - if frame.Truncated { - return http2.ErrCodeFrameSize, status.Error(codes.Internal, "peer header list size exceeded limit") - } - - for _, hf := range frame.Fields { - d.processHeaderField(hf) - } - - if d.data.isGRPC { - if d.data.grpcErr != nil { - return http2.ErrCodeProtocol, d.data.grpcErr - } - if d.serverSide { - return http2.ErrCodeNo, nil - } - if d.data.rawStatusCode == nil && d.data.statusGen == nil { - // gRPC status doesn't exist. - // Set rawStatusCode to be unknown and return nil error. - // So that, if the stream has ended this Unknown status - // will be propagated to the user. - // Otherwise, it will be ignored. In which case, status from - // a later trailer, that has StreamEnded flag set, is propagated. - code := int(codes.Unknown) - d.data.rawStatusCode = &code - } - return http2.ErrCodeNo, nil - } - - // HTTP fallback mode - if d.data.httpErr != nil { - return http2.ErrCodeProtocol, d.data.httpErr - } - - var ( - code = codes.Internal // when header does not include HTTP status, return INTERNAL - ok bool - ) - - if d.data.httpStatus != nil { - code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] - if !ok { - code = codes.Unknown - } - } - - return http2.ErrCodeProtocol, status.Error(code, d.constructHTTPErrMsg()) -} - -// constructErrMsg constructs error message to be returned in HTTP fallback mode. -// Format: HTTP status code and its corresponding message + content-type error message. -func (d *decodeState) constructHTTPErrMsg() string { - var errMsgs []string - - if d.data.httpStatus == nil { - errMsgs = append(errMsgs, "malformed header: missing HTTP status") - } else { - errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) - } - - if d.data.contentTypeErr == "" { - errMsgs = append(errMsgs, "transport: missing content-type field") - } else { - errMsgs = append(errMsgs, d.data.contentTypeErr) - } - - return strings.Join(errMsgs, "; ") -} - -func (d *decodeState) addMetadata(k, v string) { - if d.data.mdata == nil { - d.data.mdata = make(map[string][]string) +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err } - d.data.mdata[k] = append(d.data.mdata[k], v) -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value) - if !validContentType { - d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) - return - } - d.data.contentSubtype = contentSubtype - // TODO: do we want to propagate the whole content-type in the metadata, - // or come up with a way to just propagate the content-subtype if it was set? - // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} - // in the metadata? - d.addMetadata(f.Name, f.Value) - d.data.isGRPC = true - case "grpc-encoding": - d.data.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) - return - } - d.data.rawStatusCode = &code - case "grpc-message": - d.data.rawStatusMsg = decodeGrpcMessage(f.Value) - case "grpc-status-details-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - s := &spb.Status{} - if err := proto.Unmarshal(v, s); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) - return - } - d.data.statusGen = status.FromProto(s) - case "grpc-timeout": - d.data.timeoutSet = true - var err error - if d.data.timeout, err = decodeTimeout(f.Value); err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) - } - case ":path": - d.data.method = f.Value - case ":status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) - return - } - d.data.httpStatus = &code - case "grpc-tags-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) - return - } - d.data.statsTags = v - d.addMetadata(f.Name, string(v)) - case "grpc-trace-bin": - v, err := decodeBinHeader(f.Value) - if err != nil { - d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) - return - } - d.data.statsTrace = v - d.addMetadata(f.Name, string(v)) - case ":method": - d.data.httpMethod = f.Value - default: - if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { - break - } - v, err := decodeMetadataHeader(f.Name, f.Value) - if err != nil { - if logger.V(logLevel) { - logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) - } - return - } - d.addMetadata(f.Name, v) + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err } + return status.FromProto(st), nil } type timeoutUnit uint8 @@ -465,13 +249,13 @@ func encodeGrpcMessage(msg string) string { } func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder for len(msg) > 0 { r, size := utf8.DecodeRuneInString(msg) for _, b := range []byte(string(r)) { if size > 1 { // If size > 1, r is not ascii. Always do percent encoding. - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) continue } @@ -480,14 +264,14 @@ func encodeGrpcMessageUnchecked(msg string) string { // // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". if b >= spaceByte && b <= tildeByte && b != percentByte { - buf.WriteByte(b) + sb.WriteByte(b) } else { - buf.WriteString(fmt.Sprintf("%%%02X", b)) + fmt.Fprintf(&sb, "%%%02X", b) } } msg = msg[size:] } - return buf.String() + return sb.String() } // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. @@ -505,23 +289,23 @@ func decodeGrpcMessage(msg string) string { } func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer + var sb strings.Builder lenMsg := len(msg) for i := 0; i < lenMsg; i++ { c := msg[i] if c == percentByte && i+2 < lenMsg { parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) if err != nil { - buf.WriteByte(c) + sb.WriteByte(c) } else { - buf.WriteByte(byte(parsed)) + sb.WriteByte(byte(parsed)) i += 2 } } else { - buf.WriteByte(c) + sb.WriteByte(c) } } - return buf.String() + return sb.String() } type bufWriter struct { @@ -530,8 +314,6 @@ type bufWriter struct { batchSize int conn net.Conn err error - - onFlush func() } func newBufWriter(conn net.Conn, batchSize int) *bufWriter { @@ -547,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err = w.conn.Write(b) + return n, toIOError(err) } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -568,14 +351,31 @@ func (w *bufWriter) Flush() error { if w.offset == 0 { return nil } - if w.onFlush != nil { - w.onFlush() - } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer diff --git a/internal/transport/http_util_test.go b/internal/transport/http_util_test.go index 2205050acea0..cc7807670b62 100644 --- a/internal/transport/http_util_test.go +++ b/internal/transport/http_util_test.go @@ -23,9 +23,6 @@ import ( "reflect" "testing" "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" ) func (s) TestTimeoutDecode(t *testing.T) { @@ -189,68 +186,6 @@ func (s) TestDecodeMetadataHeader(t *testing.T) { } } -func (s) TestDecodeHeaderH2ErrCode(t *testing.T) { - for _, test := range []struct { - name string - // input - metaHeaderFrame *http2.MetaHeadersFrame - serverSide bool - // output - wantCode http2.ErrCode - }{ - { - name: "valid header", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - {Name: "content-type", Value: "application/grpc"}, - }}, - wantCode: http2.ErrCodeNo, - }, - { - name: "valid header serverSide", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - {Name: "content-type", Value: "application/grpc"}, - }}, - serverSide: true, - wantCode: http2.ErrCodeNo, - }, - { - name: "invalid grpc status header field", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - {Name: "content-type", Value: "application/grpc"}, - {Name: "grpc-status", Value: "xxxx"}, - }}, - wantCode: http2.ErrCodeProtocol, - }, - { - name: "invalid http content type", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - {Name: "content-type", Value: "application/json"}, - }}, - wantCode: http2.ErrCodeProtocol, - }, - { - name: "http fallback and invalid http status", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: []hpack.HeaderField{ - // No content type provided then fallback into handling http error. - {Name: ":status", Value: "xxxx"}, - }}, - wantCode: http2.ErrCodeProtocol, - }, - { - name: "http2 frame size exceeds", - metaHeaderFrame: &http2.MetaHeadersFrame{Fields: nil, Truncated: true}, - wantCode: http2.ErrCodeFrameSize, - }, - } { - t.Run(test.name, func(t *testing.T) { - state := &decodeState{serverSide: test.serverSide} - if h2code, _ := state.decodeHeader(test.metaHeaderFrame); h2code != test.wantCode { - t.Fatalf("decodeState.decodeHeader(%v) = %v, want %v", test.metaHeaderFrame, h2code, test.wantCode) - } - }) - } -} - func (s) TestParseDialTarget(t *testing.T) { for _, test := range []struct { target, wantNet, wantAddr string @@ -279,3 +214,27 @@ func (s) TestParseDialTarget(t *testing.T) { } } } + +func BenchmarkDecodeGrpcMessage(b *testing.B) { + input := "Hello, %E4%B8%96%E7%95%8C" + want := "Hello, 世界" + b.ReportAllocs() + for i := 0; i < b.N; i++ { + got := decodeGrpcMessage(input) + if got != want { + b.Fatalf("decodeGrpcMessage(%q) = %s, want %s", input, got, want) + } + } +} + +func BenchmarkEncodeGrpcMessage(b *testing.B) { + input := "Hello, 世界" + want := "Hello, %E4%B8%96%E7%95%8C" + b.ReportAllocs() + for i := 0; i < b.N; i++ { + got := encodeGrpcMessage(input) + if got != want { + b.Fatalf("encodeGrpcMessage(%q) = %s, want %s", input, got, want) + } + } +} diff --git a/internal/transport/keepalive_test.go b/internal/transport/keepalive_test.go index c8f177fecf1b..a46bcf020df8 100644 --- a/internal/transport/keepalive_test.go +++ b/internal/transport/keepalive_test.go @@ -24,14 +24,23 @@ package transport import ( "context" + "crypto/tls" + "crypto/x509" + "fmt" "io" "net" + "os" + "strings" "testing" "time" "golang.org/x/net/http2" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/testdata" ) const defaultTestTimeout = 10 * time.Second @@ -42,12 +51,12 @@ const defaultTestTimeout = 10 * time.Second func (s) TestMaxConnectionIdle(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionIdle: 2 * time.Second, + MaxConnectionIdle: 30 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -60,33 +69,33 @@ func (s) TestMaxConnectionIdle(t *testing.T) { } client.CloseStream(stream, io.EOF) - // Wait for the server's MaxConnectionIdle timeout to kick in, and for it - // to send a GoAway. - timeout := time.NewTimer(time.Second * 4) + // Verify the server sends a GoAway to client after MaxConnectionIdle timeout + // kicks in. select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { + case <-ctx.Done(): + t.Fatalf("context expired before receiving GoAway from the server.") + case <-client.GoAway(): + reason, debugMsg := client.GetGoAwayReason() + if reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } - case <-timeout.C: - t.Fatalf("MaxConnectionIdle timeout expired, expected a GoAway from the server.") + if !strings.Contains(debugMsg, "max_idle") { + t.Fatalf("GoAwayDebugMessage is %v, want %v", debugMsg, "max_idle") + } } } -// TestMaxConenctionIdleBusyClient tests that a server will not send GoAway to +// TestMaxConnectionIdleBusyClient tests that a server will not send GoAway to // a busy client. func (s) TestMaxConnectionIdleBusyClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionIdle: 2 * time.Second, + MaxConnectionIdle: 100 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -98,55 +107,53 @@ func (s) TestMaxConnectionIdleBusyClient(t *testing.T) { t.Fatalf("client.NewStream() failed: %v", err) } - // Wait for double the MaxConnectionIdle time to make sure the server does - // not send a GoAway, as the client has an open stream. - timeout := time.NewTimer(time.Second * 4) + // Verify the server does not send a GoAway to client even after MaxConnectionIdle + // timeout kicks in. + ctx, cancel = context.WithTimeout(context.Background(), time.Second) + defer cancel() select { case <-client.GoAway(): - if !timeout.Stop() { - <-timeout.C - } - t.Fatalf("A non-idle client received a GoAway.") - case <-timeout.C: + t.Fatalf("A busy client received a GoAway.") + case <-ctx.Done(): } } // TestMaxConnectionAge tests that a server will send GoAway after a duration // of MaxConnectionAge. func (s) TestMaxConnectionAge(t *testing.T) { + maxConnAge := 100 * time.Millisecond serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - MaxConnectionAge: 1 * time.Second, - MaxConnectionAgeGrace: 1 * time.Second, + MaxConnectionAge: maxConnAge, + MaxConnectionAgeGrace: 10 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - _, err := client.NewStream(ctx, &CallHdr{}) - if err != nil { + if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { t.Fatalf("client.NewStream() failed: %v", err) } - // Wait for the server's MaxConnectionAge timeout to kick in, and for it - // to send a GoAway. - timeout := time.NewTimer(4 * time.Second) + // Verify the server sends a GoAway to client even after client remains idle + // for more than MaxConnectionIdle time. select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayNoReason { + case <-client.GoAway(): + reason, debugMsg := client.GetGoAwayReason() + if reason != GoAwayNoReason { t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayNoReason) } - case <-timeout.C: - t.Fatalf("MaxConnectionAge timeout expired, expected a GoAway from the server.") + if !strings.Contains(debugMsg, "max_age") { + t.Fatalf("GoAwayDebugMessage is %v, want %v", debugMsg, "max_age") + } + case <-ctx.Done(): + t.Fatalf("timed out before getting a GoAway from the server.") } } @@ -163,13 +170,13 @@ const ( func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, + Time: 100 * time.Millisecond, + Timeout: 10 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -192,7 +199,7 @@ func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { // We read from the net.Conn till we get an error, which is expected when // the server closes the connection as part of the keepalive logic. - errCh := make(chan error) + errCh := make(chan error, 1) go func() { b := make([]byte, 24) for { @@ -205,15 +212,16 @@ func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { // Server waits for KeepaliveParams.Time seconds before sending out a ping, // and then waits for KeepaliveParams.Timeout for a ping ack. - timeout := time.NewTimer(4 * time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() select { case err := <-errCh: if err != io.EOF { t.Fatalf("client.Read(_) = _,%v, want io.EOF", err) } - case <-timeout.C: - t.Fatalf("keepalive timeout expired, server should have closed the connection.") + case <-ctx.Done(): + t.Fatalf("Test timed out before server closed the connection.") } } @@ -222,25 +230,22 @@ func (s) TestKeepaliveServerClosesUnresponsiveClient(t *testing.T) { func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) { serverConfig := &ServerConfig{ KeepaliveParams: keepalive.ServerParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, + Time: 100 * time.Millisecond, + Timeout: 100 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() // Give keepalive logic some time by sleeping. - time.Sleep(4 * time.Second) + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -251,13 +256,17 @@ func (s) TestKeepaliveServerWithResponsiveClient(t *testing.T) { // logic is running even without any active streams. func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - PermitWithoutStream: true, - }}, connCh) + copts := ConnectOptions{ + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + KeepaliveParams: keepalive.ClientParameters{ + Time: 10 * time.Millisecond, + Timeout: 10 * time.Millisecond, + PermitWithoutStream: true, + }, + } + client, cancel := setUpWithNoPingServer(t, copts, connCh) defer cancel() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) conn, ok := <-connCh if !ok { @@ -265,14 +274,8 @@ func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { } defer conn.Close() - // Sleep for keepalive to close the connection. - time.Sleep(4 * time.Second) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := pollForStreamCreationError(client); err != nil { + t.Fatal(err) } } @@ -283,12 +286,16 @@ func (s) TestKeepaliveClientClosesUnresponsiveServer(t *testing.T) { // active streams, and therefore the transport stays open. func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - }}, connCh) + copts := ConnectOptions{ + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + KeepaliveParams: keepalive.ClientParameters{ + Time: 10 * time.Millisecond, + Timeout: 10 * time.Millisecond, + }, + } + client, cancel := setUpWithNoPingServer(t, copts, connCh) defer cancel() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) conn, ok := <-connCh if !ok { @@ -297,13 +304,10 @@ func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { defer conn.Close() // Give keepalive some time. - time.Sleep(4 * time.Second) + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -312,12 +316,18 @@ func (s) TestKeepaliveClientOpenWithUnresponsiveServer(t *testing.T) { // transport even when there is an active stream. func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { connCh := make(chan net.Conn, 1) - client, cancel := setUpWithNoPingServer(t, ConnectOptions{KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - }}, connCh) + copts := ConnectOptions{ + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + KeepaliveParams: keepalive.ClientParameters{ + Time: 500 * time.Millisecond, + Timeout: 500 * time.Millisecond, + }, + } + // TODO(i/6099): Setup a server which can ping and no-ping based on a flag to + // reduce the flakiness in this test. + client, cancel := setUpWithNoPingServer(t, copts, connCh) defer cancel() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) conn, ok := <-connCh if !ok { @@ -329,15 +339,11 @@ func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { defer cancel() // Create a stream, but send no data on it. if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + t.Fatalf("Stream creation failed: %v", err) } - // Give keepalive some time. - time.Sleep(4 * time.Second) - - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := pollForStreamCreationError(client); err != nil { + t.Fatal(err) } } @@ -345,75 +351,66 @@ func (s) TestKeepaliveClientClosesWithActiveStreams(t *testing.T) { // responds to keepalive pings, and makes sure than a client transport stays // healthy without any active streams. func (s) TestKeepaliveClientStaysHealthyWithResponsiveServer(t *testing.T) { - server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, normal, ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 1 * time.Second, - PermitWithoutStream: true, - }}) + server, client, cancel := setUpWithOptions(t, 0, + &ServerConfig{ + KeepalivePolicy: keepalive.EnforcementPolicy{ + MinTime: 50 * time.Millisecond, + PermitWithoutStream: true, + }, + }, + normal, + ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: 55 * time.Millisecond, + Timeout: time.Second, + PermitWithoutStream: true, + }}) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() // Give keepalive some time. - time.Sleep(4 * time.Second) + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } // TestKeepaliveClientFrequency creates a server which expects at most 1 client -// ping for every 1.2 seconds, while the client is configured to send a ping -// every 1 second. So, this configuration should end up with the client +// ping for every 100 ms, while the client is configured to send a ping +// every 50 ms. So, this configuration should end up with the client // transport being closed. But we had a bug wherein the client was sending one // ping every [Time+Timeout] instead of every [Time] period, and this test // explicitly makes sure the fix works and the client sends a ping every [Time] // period. func (s) TestKeepaliveClientFrequency(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 1200 * time.Millisecond, // 1.2 seconds + MinTime: 100 * time.Millisecond, PermitWithoutStream: true, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 1 * time.Second, - Timeout: 2 * time.Second, + Time: 50 * time.Millisecond, + Timeout: time.Second, PermitWithoutStream: true, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() - timeout := time.NewTimer(6 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) - } - case <-timeout.C: - t.Fatalf("client transport still healthy; expected GoAway from the server.") - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := waitForGoAwayTooManyPings(client); err != nil { + t.Fatal(err) } } @@ -422,43 +419,29 @@ func (s) TestKeepaliveClientFrequency(t *testing.T) { // (when there are no active streams), based on the configured // EnforcementPolicy. func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, + MinTime: time.Second, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, + Time: 20 * time.Millisecond, + Timeout: 100 * time.Millisecond, PermitWithoutStream: true, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() - timeout := time.NewTimer(4 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) - } - case <-timeout.C: - t.Fatalf("client transport still healthy; expected GoAway from the server.") - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := waitForGoAwayTooManyPings(client); err != nil { + t.Fatal(err) } } @@ -467,20 +450,22 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientNoRPC(t *testing.T) { // (even when there is an active stream), based on the configured // EnforcementPolicy. func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, + MinTime: time.Second, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, + Timeout: 100 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() @@ -488,25 +473,11 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + t.Fatalf("Stream creation failed: %v", err) } - timeout := time.NewTimer(4 * time.Second) - select { - case <-client.Error(): - if !timeout.Stop() { - <-timeout.C - } - if reason := client.GetGoAwayReason(); reason != GoAwayTooManyPings { - t.Fatalf("GoAwayReason is %v, want %v", reason, GoAwayTooManyPings) - } - case <-timeout.C: - t.Fatalf("client transport still healthy; expected GoAway from the server.") - } - - // Make sure the client transport is not healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { - t.Fatal("client.NewStream() should have failed, but succeeded") + if err := waitForGoAwayTooManyPings(client); err != nil { + t.Fatal(err) } } @@ -517,32 +488,30 @@ func (s) TestKeepaliveServerEnforcementWithAbusiveClientWithRPC(t *testing.T) { func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 100 * time.Millisecond, + MinTime: 40 * time.Millisecond, PermitWithoutStream: true, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 101 * time.Millisecond, - Timeout: 1 * time.Second, + Time: 50 * time.Millisecond, + Timeout: time.Second, PermitWithoutStream: true, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() - // Give keepalive enough time. - time.Sleep(3 * time.Second) + // Sleep for client to send ~10 keepalive pings. + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + // Verify that the server does not close the client transport. + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -553,34 +522,30 @@ func (s) TestKeepaliveServerEnforcementWithObeyingClientNoRPC(t *testing.T) { func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 100 * time.Millisecond, + MinTime: 40 * time.Millisecond, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 101 * time.Millisecond, - Timeout: 1 * time.Second, + Time: 50 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } // Give keepalive enough time. - time.Sleep(3 * time.Second) + time.Sleep(500 * time.Millisecond) - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -593,30 +558,27 @@ func (s) TestKeepaliveServerEnforcementWithObeyingClientWithRPC(t *testing.T) { func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T) { serverConfig := &ServerConfig{ KeepalivePolicy: keepalive.EnforcementPolicy{ - MinTime: 2 * time.Second, + MinTime: 100 * time.Millisecond, }, } clientOptions := ConnectOptions{ KeepaliveParams: keepalive.ClientParameters{ - Time: 50 * time.Millisecond, - Timeout: 1 * time.Second, + Time: 10 * time.Millisecond, + Timeout: 10 * time.Millisecond, }, } server, client, cancel := setUpWithOptions(t, 0, serverConfig, normal, clientOptions) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() // No active streams on the client. Give keepalive enough time. - time.Sleep(5 * time.Second) + time.Sleep(500 * time.Millisecond) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Make sure the client transport is healthy. - if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { - t.Fatalf("client.NewStream() failed: %v", err) + if err := checkForHealthyStream(client); err != nil { + t.Fatalf("Stream creation failed: %v", err) } } @@ -624,45 +586,111 @@ func (s) TestKeepaliveServerEnforcementWithDormantKeepaliveOnClient(t *testing.T // the keepalive timeout, as detailed in proposal A18. func (s) TestTCPUserTimeout(t *testing.T) { tests := []struct { - time time.Duration - timeout time.Duration - wantTimeout time.Duration + tls bool + time time.Duration + timeout time.Duration + clientWantTimeout time.Duration + serverWantTimeout time.Duration }{ { + false, + 10 * time.Second, + 10 * time.Second, + 10 * 1000 * time.Millisecond, + 10 * 1000 * time.Millisecond, + }, + { + false, + 0, + 0, + 0, + 20 * 1000 * time.Millisecond, + }, + { + false, + infinity, + infinity, + 0, + 0, + }, + { + true, 10 * time.Second, 10 * time.Second, 10 * 1000 * time.Millisecond, + 10 * 1000 * time.Millisecond, }, { + true, 0, 0, 0, + 20 * 1000 * time.Millisecond, + }, + { + true, + infinity, + infinity, + 0, + 0, }, } for _, tt := range tests { + sopts := &ServerConfig{ + KeepaliveParams: keepalive.ServerParameters{ + Time: tt.time, + Timeout: tt.timeout, + }, + } + + copts := ConnectOptions{ + KeepaliveParams: keepalive.ClientParameters{ + Time: tt.time, + Timeout: tt.timeout, + }, + } + + if tt.tls { + copts.TransportCredentials = makeTLSCreds(t, "x509/client1_cert.pem", "x509/client1_key.pem", "x509/server_ca_cert.pem") + sopts.Credentials = makeTLSCreds(t, "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") + + } + server, client, cancel := setUpWithOptions( t, 0, - &ServerConfig{ - KeepaliveParams: keepalive.ServerParameters{ - Time: tt.timeout, - Timeout: tt.timeout, - }, - }, + sopts, normal, - ConnectOptions{ - KeepaliveParams: keepalive.ClientParameters{ - Time: tt.time, - Timeout: tt.timeout, - }, - }, + copts, ) defer func() { - client.Close() + client.Close(fmt.Errorf("closed manually by test")) server.stop() cancel() }() + var sc *http2Server + var srawConn net.Conn + // Wait until the server transport is setup. + for { + server.mu.Lock() + if len(server.conns) == 0 { + server.mu.Unlock() + time.Sleep(time.Millisecond) + continue + } + for k := range server.conns { + var ok bool + sc, ok = k.(*http2Server) + if !ok { + t.Fatalf("Failed to convert %v to *http2Server", k) + } + srawConn = server.conns[k] + } + server.mu.Unlock() + break + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() stream, err := client.NewStream(ctx, &CallHdr{}) @@ -671,15 +699,104 @@ func (s) TestTCPUserTimeout(t *testing.T) { } client.CloseStream(stream, io.EOF) - opt, err := syscall.GetTCPUserTimeout(client.conn) + // check client TCP user timeout only when non TLS + // TODO : find a way to get the underlying conn for client when TLS + if !tt.tls { + cltOpt, err := syscall.GetTCPUserTimeout(client.conn) + if err != nil { + t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) + } + if cltOpt < 0 { + t.Skipf("skipping test on unsupported environment") + } + if gotTimeout := time.Duration(cltOpt) * time.Millisecond; gotTimeout != tt.clientWantTimeout { + t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.clientWantTimeout) + } + } + scConn := sc.conn + if tt.tls { + if _, ok := sc.conn.(*net.TCPConn); ok { + t.Fatalf("sc.conn is should have wrapped conn with TLS") + } + scConn = srawConn + } + // verify the type of scConn (on which TCP user timeout will be got) + if _, ok := scConn.(*net.TCPConn); !ok { + t.Fatalf("server underlying conn is of type %T, want net.TCPConn", scConn) + } + srvOpt, err := syscall.GetTCPUserTimeout(scConn) if err != nil { t.Fatalf("syscall.GetTCPUserTimeout() failed: %v", err) } - if opt < 0 { - t.Skipf("skipping test on unsupported environment") + if gotTimeout := time.Duration(srvOpt) * time.Millisecond; gotTimeout != tt.serverWantTimeout { + t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.serverWantTimeout) + } + + } +} + +func makeTLSCreds(t *testing.T, certPath, keyPath, rootsPath string) credentials.TransportCredentials { + cert, err := tls.LoadX509KeyPair(testdata.Path(certPath), testdata.Path(keyPath)) + if err != nil { + t.Fatalf("tls.LoadX509KeyPair(%q, %q) failed: %v", certPath, keyPath, err) + } + b, err := os.ReadFile(testdata.Path(rootsPath)) + if err != nil { + t.Fatalf("os.ReadFile(%q) failed: %v", rootsPath, err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + t.Fatal("failed to append certificates") + } + return credentials.NewTLS(&tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: roots, + InsecureSkipVerify: true, + }) +} + +// checkForHealthyStream attempts to create a stream and return error if any. +// The stream created is closed right after to avoid any leakages. +func checkForHealthyStream(client *http2Client) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := client.NewStream(ctx, &CallHdr{}) + client.CloseStream(stream, err) + return err +} + +func pollForStreamCreationError(client *http2Client) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for { + if _, err := client.NewStream(ctx, &CallHdr{}); err != nil { + break } - if gotTimeout := time.Duration(opt) * time.Millisecond; gotTimeout != tt.wantTimeout { - t.Fatalf("syscall.GetTCPUserTimeout() = %d, want %d", gotTimeout, tt.wantTimeout) + time.Sleep(50 * time.Millisecond) + } + if ctx.Err() != nil { + return fmt.Errorf("test timed out before stream creation returned an error") + } + return nil +} + +// waitForGoAwayTooManyPings waits for client to receive a GoAwayTooManyPings +// from server. It also asserts that stream creation fails after receiving a +// GoAway. +func waitForGoAwayTooManyPings(client *http2Client) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-client.GoAway(): + if reason, _ := client.GetGoAwayReason(); reason != GoAwayTooManyPings { + return fmt.Errorf("goAwayReason is %v, want %v", reason, GoAwayTooManyPings) } + case <-ctx.Done(): + return fmt.Errorf("test timed out before getting GoAway with reason:GoAwayTooManyPings from server") + } + + if _, err := client.NewStream(ctx, &CallHdr{}); err == nil { + return fmt.Errorf("stream creation succeeded after receiving a GoAway from the server") } + return nil } diff --git a/internal/transport/logging.go b/internal/transport/logging.go new file mode 100644 index 000000000000..42ed2b07af66 --- /dev/null +++ b/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/internal/transport/networktype/networktype.go b/internal/transport/networktype/networktype.go index 96967428b515..c11b5278274f 100644 --- a/internal/transport/networktype/networktype.go +++ b/internal/transport/networktype/networktype.go @@ -17,7 +17,7 @@ */ // Package networktype declares the network type to be used in the default -// dailer. Attribute of a resolver.Address. +// dialer. Attribute of a resolver.Address. package networktype import ( @@ -31,7 +31,7 @@ const key = keyType("grpc.internal.transport.networktype") // Set returns a copy of the provided address with attributes containing networkType. func Set(address resolver.Address, networkType string) resolver.Address { - address.Attributes = address.Attributes.WithValues(key, networkType) + address.Attributes = address.Attributes.WithValue(key, networkType) return address } diff --git a/internal/transport/proxy.go b/internal/transport/proxy.go index a662bf39a6c8..415961987870 100644 --- a/internal/transport/proxy.go +++ b/internal/transport/proxy.go @@ -37,7 +37,7 @@ var ( httpProxyFromEnvironment = http.ProxyFromEnvironment ) -func mapAddress(ctx context.Context, address string) (*url.URL, error) { +func mapAddress(address string) (*url.URL, error) { req := &http.Request{ URL: &url.URL{ Scheme: "https", @@ -114,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // connection. func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { newAddr := addr - proxyURL, err := mapAddress(ctx, addr) + proxyURL, err := mapAddress(addr) if err != nil { return nil, err } diff --git a/internal/transport/proxy_test.go b/internal/transport/proxy_test.go index a2f1aa438546..8abee1e7b383 100644 --- a/internal/transport/proxy_test.go +++ b/internal/transport/proxy_test.go @@ -1,3 +1,4 @@ +//go:build !race // +build !race /* @@ -119,7 +120,7 @@ func testHTTPConnect(t *testing.T, proxyURLModify func(*url.URL) *url.URL, proxy msg := []byte{4, 3, 5, 2} recvBuf := make([]byte, len(msg)) - done := make(chan error) + done := make(chan error, 1) go func() { in, err := blis.Accept() if err != nil { @@ -210,11 +211,8 @@ func (s) TestMapAddressEnv(t *testing.T) { } defer overwrite(hpfe)() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // envTestAddr should be handled by ProxyFromEnvironment. - got, err := mapAddress(ctx, envTestAddr) + got, err := mapAddress(envTestAddr) if err != nil { t.Error(err) } diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 5cf7c5f80fe1..aa1c896595d9 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -30,9 +30,11 @@ import ( "net" "sync" "sync/atomic" + "time" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -41,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -251,6 +257,9 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -339,8 +348,24 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors } // Done returns a channel which is closed when it receives the final status @@ -364,9 +389,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -518,26 +549,21 @@ const ( // ServerConfig consists of all the configurations to establish a server transport. type ServerConfig struct { MaxStreams uint32 - AuthInfo credentials.AuthInfo + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials InTapHandle tap.ServerInHandle - StatsHandler stats.Handler + StatsHandlers []stats.Handler KeepaliveParams keepalive.ServerParameters KeepalivePolicy keepalive.EnforcementPolicy InitialWindowSize int32 InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { - return newHTTP2Server(conn, config) -} - // ConnectOptions covers all relevant options for communicating with the server. type ConnectOptions struct { // UserAgent is the application user agent. @@ -556,8 +582,8 @@ type ConnectOptions struct { CredsBundle credentials.Bundle // KeepaliveParams stores the keepalive parameters. KeepaliveParams keepalive.ClientParameters - // StatsHandler stores the handler for stats. - StatsHandler stats.Handler + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler // InitialWindowSize sets the initial window size for a stream. InitialWindowSize int32 // InitialConnWindowSize sets the initial window size for a connection. @@ -567,7 +593,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -576,8 +602,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -622,7 +648,7 @@ type ClientTransport interface { // Close tears down this transport. Once it returns, the transport // should not be accessed any more. The caller must make sure this // is called only once. - Close() error + Close(err error) // GracefulClose starts to tear down the transport: the transport will stop // accepting new RPCs and NewStream will return error. Once all streams are @@ -656,8 +682,9 @@ type ClientTransport interface { // HTTP/2). GoAway() <-chan struct{} - // GetGoAwayReason returns the reason why GoAway frame was received. - GetGoAwayReason() GoAwayReason + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr @@ -693,13 +720,13 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() error + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -744,6 +771,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/internal/transport/transport_test.go b/internal/transport/transport_test.go index 1d8d3ed355df..258ef7411cf0 100644 --- a/internal/transport/transport_test.go +++ b/internal/transport/transport_test.go @@ -27,6 +27,7 @@ import ( "io" "math" "net" + "os" "runtime" "strconv" "strings" @@ -34,12 +35,15 @@ import ( "testing" "time" + "google.golang.org/grpc/peer" + "github.com/google/go-cmp/cmp" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/attributes" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/leakcheck" "google.golang.org/grpc/internal/testutils" @@ -55,16 +59,6 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -type server struct { - lis net.Listener - port string - startedErr chan error // error (or nil) with server start value - mu sync.Mutex - conns map[ServerTransport]bool - h *testStreamHandler - ready chan struct{} -} - var ( expectedRequest = []byte("ping") expectedResponse = []byte("pong") @@ -194,12 +188,12 @@ func (h *testStreamHandler) handleStreamMisbehave(t *testing.T, s *Stream) { } } -func (h *testStreamHandler) handleStreamEncodingRequiredStatus(t *testing.T, s *Stream) { +func (h *testStreamHandler) handleStreamEncodingRequiredStatus(s *Stream) { // raw newline is not accepted by http2 framer so it must be encoded. h.t.WriteStatus(s, encodingTestStatus) } -func (h *testStreamHandler) handleStreamInvalidHeaderField(t *testing.T, s *Stream) { +func (h *testStreamHandler) handleStreamInvalidHeaderField(s *Stream) { headerFields := []hpack.HeaderField{} headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: expectedInvalidHeaderField}) h.t.controlBuf.put(&headerFrame{ @@ -298,6 +292,25 @@ func (h *testStreamHandler) handleStreamDelayRead(t *testing.T, s *Stream) { } } +type server struct { + lis net.Listener + port string + startedErr chan error // error (or nil) with server start value + mu sync.Mutex + conns map[ServerTransport]net.Conn + h *testStreamHandler + ready chan struct{} + channelzID *channelz.Identifier +} + +func newTestServer() *server { + return &server{ + startedErr: make(chan error, 1), + ready: make(chan struct{}), + channelzID: channelz.NewIdentifierForTesting(channelz.RefServer, time.Now().Unix(), nil), + } +} + // start starts server. Other goroutines should block on s.readyChan for further operations. func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hType) { var err error @@ -316,24 +329,25 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT return } s.port = p - s.conns = make(map[ServerTransport]bool) + s.conns = make(map[ServerTransport]net.Conn) s.startedErr <- nil for { conn, err := s.lis.Accept() if err != nil { return } - transport, err := NewServerTransport("http2", conn, serverConfig) + rawConn := conn + transport, err := NewServerTransport(conn, serverConfig) if err != nil { return } s.mu.Lock() if s.conns == nil { s.mu.Unlock() - transport.Close() + transport.Close(errors.New("s.conns is nil")) return } - s.conns[transport] = true + s.conns[transport] = rawConn h := &testStreamHandler{t: transport.(*http2Server)} s.h = h s.mu.Unlock() @@ -356,13 +370,13 @@ func (s *server) start(t *testing.T, port int, serverConfig *ServerConfig, ht hT }) case encodingRequiredStatus: go transport.HandleStreams(func(s *Stream) { - go h.handleStreamEncodingRequiredStatus(t, s) + go h.handleStreamEncodingRequiredStatus(s) }, func(ctx context.Context, method string) context.Context { return ctx }) case invalidHeaderField: go transport.HandleStreams(func(s *Stream) { - go h.handleStreamInvalidHeaderField(t, s) + go h.handleStreamInvalidHeaderField(s) }, func(ctx context.Context, method string) context.Context { return ctx }) @@ -408,7 +422,7 @@ func (s *server) stop() { s.lis.Close() s.mu.Lock() for c := range s.conns { - c.Close() + c.Close(errors.New("server Stop called")) } s.conns = nil s.mu.Unlock() @@ -421,9 +435,10 @@ func (s *server) addr() string { return s.lis.Addr().String() } -func setUpServerOnly(t *testing.T, port int, serverConfig *ServerConfig, ht hType) *server { - server := &server{startedErr: make(chan error, 1), ready: make(chan struct{})} - go server.start(t, port, serverConfig, ht) +func setUpServerOnly(t *testing.T, port int, sc *ServerConfig, ht hType) *server { + server := newTestServer() + sc.ChannelzParentID = server.channelzID + go server.start(t, port, sc, ht) server.wait(t, 2*time.Second) return server } @@ -432,11 +447,13 @@ func setUp(t *testing.T, port int, maxStreams uint32, ht hType) (*server, *http2 return setUpWithOptions(t, port, &ServerConfig{MaxStreams: maxStreams}, ht, ConnectOptions{}) } -func setUpWithOptions(t *testing.T, port int, serverConfig *ServerConfig, ht hType, copts ConnectOptions) (*server, *http2Client, func()) { - server := setUpServerOnly(t, port, serverConfig, ht) +func setUpWithOptions(t *testing.T, port int, sc *ServerConfig, ht hType, copts ConnectOptions) (*server, *http2Client, func()) { + server := setUpServerOnly(t, port, sc, ht) addr := resolver.Address{Addr: "localhost:" + server.port} + copts.ChannelzParentID = channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil) + connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) - ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func() {}, func(GoAwayReason) {}, func() {}) + ct, connErr := NewClientTransport(connectCtx, context.Background(), addr, copts, func(GoAwayReason) {}) if connErr != nil { cancel() // Do not cancel in success path. t.Fatalf("failed to create transport: %v", connErr) @@ -458,10 +475,16 @@ func setUpWithNoPingServer(t *testing.T, copts ConnectOptions, connCh chan net.C close(connCh) return } + framer := http2.NewFramer(conn, conn) + if err := framer.WriteSettings(); err != nil { + t.Errorf("Error at server-side while writing settings: %v", err) + close(connCh) + return + } connCh <- conn }() connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) - tr, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func() {}, func(GoAwayReason) {}, func() {}) + tr, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err != nil { cancel() // Do not cancel in success path. // Server clean-up. @@ -481,7 +504,7 @@ func (s) TestInflightStreamClosing(t *testing.T) { server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -514,6 +537,52 @@ func (s) TestInflightStreamClosing(t *testing.T) { } } +// Tests that when streamID > MaxStreamId, the current client transport drains. +func (s) TestClientTransportDrainsAfterStreamIDExhausted(t *testing.T) { + server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) + defer cancel() + defer server.stop() + callHdr := &CallHdr{ + Host: "localhost", + Method: "foo.Small", + } + + originalMaxStreamID := MaxStreamID + MaxStreamID = 3 + defer func() { + MaxStreamID = originalMaxStreamID + }() + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + + s, err := ct.NewStream(ctx, callHdr) + if err != nil { + t.Fatalf("ct.NewStream() = %v", err) + } + if s.id != 1 { + t.Fatalf("Stream id: %d, want: 1", s.id) + } + + if got, want := ct.stateForTesting(), reachable; got != want { + t.Fatalf("Client transport state %v, want %v", got, want) + } + + // The expected stream ID here is 3 since stream IDs are incremented by 2. + s, err = ct.NewStream(ctx, callHdr) + if err != nil { + t.Fatalf("ct.NewStream() = %v", err) + } + if s.id != 3 { + t.Fatalf("Stream id: %d, want: 3", s.id) + } + + // Verifying that ct.state is draining when next stream ID > MaxStreamId. + if got, want := ct.stateForTesting(), draining; got != want { + t.Fatalf("Client transport state %v, want %v", got, want) + } +} + func (s) TestClientSendAndReceive(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() @@ -550,7 +619,7 @@ func (s) TestClientSendAndReceive(t *testing.T) { if recvErr != io.EOF { t.Fatalf("Error: %v; want ", recvErr) } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -560,7 +629,7 @@ func (s) TestClientErrorNotify(t *testing.T) { go server.stop() // ct.reader should detect the error and activate ct.Error(). <-ct.Error() - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) } func performOneRPC(ct ClientTransport) { @@ -591,16 +660,13 @@ func performOneRPC(ct ClientTransport) { func (s) TestClientMix(t *testing.T) { s, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() - go func(s *server) { - time.Sleep(5 * time.Second) - s.stop() - }(s) + time.AfterFunc(time.Second, s.stop) go func(ct ClientTransport) { <-ct.Error() - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) }(ct) - for i := 0; i < 1000; i++ { - time.Sleep(10 * time.Millisecond) + for i := 0; i < 750; i++ { + time.Sleep(2 * time.Millisecond) go performOneRPC(ct) } } @@ -636,7 +702,7 @@ func (s) TestLargeMessage(t *testing.T) { }() } wg.Wait() - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -653,7 +719,7 @@ func (s) TestLargeMessageWithDelayRead(t *testing.T) { server, ct, cancel := setUpWithOptions(t, 0, sc, delayRead, co) defer cancel() defer server.stop() - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) server.mu.Lock() ready := server.ready server.mu.Unlock() @@ -737,6 +803,9 @@ func (s) TestLargeMessageWithDelayRead(t *testing.T) { } } +// TestGracefulClose ensures that GracefulClose allows in-flight streams to +// proceed until they complete naturally, while not allowing creation of new +// streams during this window. func (s) TestGracefulClose(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, pingpong) defer cancel() @@ -752,6 +821,9 @@ func (s) TestGracefulClose(t *testing.T) { }() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) defer cancel() + + // Create a stream that will exist for this whole test and confirm basic + // functionality. s, err := ct.NewStream(ctx, &CallHdr{}) if err != nil { t.Fatalf("NewStream(_, _) = _, %v, want _, ", err) @@ -772,31 +844,31 @@ func (s) TestGracefulClose(t *testing.T) { if _, err := s.Read(recvMsg); err != nil { t.Fatalf("Error while reading: %v", err) } + + // Gracefully close the transport, which should not affect the existing + // stream. ct.GracefulClose() + var wg sync.WaitGroup - // Expect the failure for all the follow-up streams because ct has been closed gracefully. + // Expect errors creating new streams because the client transport has been + // gracefully closed. for i := 0; i < 200; i++ { wg.Add(1) go func() { defer wg.Done() - str, err := ct.NewStream(ctx, &CallHdr{}) - if err == ErrConnClosing { - return - } else if err != nil { - t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing) + _, err := ct.NewStream(ctx, &CallHdr{}) + if err != nil && err.(*NewStreamError).Err == ErrConnClosing && err.(*NewStreamError).AllowTransparentRetry { return } - ct.Write(str, nil, nil, &Options{Last: true}) - if _, err := str.Read(make([]byte, 8)); err != errStreamDrain && err != ErrConnClosing { - t.Errorf("_.Read(_) = _, %v, want _, %v or %v", err, errStreamDrain, ErrConnClosing) - } + t.Errorf("_.NewStream(_, _) = _, %v, want _, %v", err, ErrConnClosing) }() } + + // Confirm the existing stream still functions as expected. ct.Write(s, nil, nil, &Options{Last: true}) if _, err := s.Read(incomingHeader); err != io.EOF { t.Fatalf("Client expected EOF from the server. Got: %v", err) } - // The stream which was created before graceful close can still proceed. wg.Wait() } @@ -831,7 +903,7 @@ func (s) TestLargeMessageSuspension(t *testing.T) { if _, err := s.Read(make([]byte, 8)); err.Error() != expectedErr.Error() { t.Fatalf("Read got %v of type %T, want %v", err, err, expectedErr) } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -841,7 +913,7 @@ func (s) TestMaxStreams(t *testing.T) { } server, ct, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) defer server.stop() callHdr := &CallHdr{ Host: "localhost", @@ -901,7 +973,7 @@ func (s) TestMaxStreams(t *testing.T) { // Close the first stream created so that the new stream can finally be created. ct.CloseStream(s, nil) <-done - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) <-ct.writerDone if ct.maxConcurrentStreams != 1 { t.Fatalf("ct.maxConcurrentStreams: %d, want 1", ct.maxConcurrentStreams) @@ -960,7 +1032,7 @@ func (s) TestServerContextCanceledOnClosedConnection(t *testing.T) { sc.mu.Unlock() break } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) select { case <-ss.Context().Done(): if ss.Context().Err() != context.Canceled { @@ -980,7 +1052,7 @@ func (s) TestClientConnDecoupledFromApplicationRead(t *testing.T) { server, client, cancel := setUpWithOptions(t, 0, &ServerConfig{}, notifyCall, connectOptions) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) waitWhileTrue(t, func() (bool, error) { server.mu.Lock() @@ -1069,7 +1141,7 @@ func (s) TestServerConnDecoupledFromApplicationRead(t *testing.T) { server, client, cancel := setUpWithOptions(t, 0, serverConfig, suspended, ConnectOptions{}) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() @@ -1232,6 +1304,58 @@ func (s) TestServerWithMisbehavedClient(t *testing.T) { } } +func (s) TestClientHonorsConnectContext(t *testing.T) { + // Create a server that will not send a preface. + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error while listening: %v", err) + } + defer lis.Close() + go func() { // Launch the misbehaving server. + sconn, err := lis.Accept() + if err != nil { + t.Errorf("Error while accepting: %v", err) + return + } + defer sconn.Close() + if _, err := io.ReadFull(sconn, make([]byte, len(clientPreface))); err != nil { + t.Errorf("Error while reading client preface: %v", err) + return + } + sfr := http2.NewFramer(sconn, sconn) + // Do not write a settings frame, but read from the conn forever. + for { + if _, err := sfr.ReadFrame(); err != nil { + return + } + } + }() + + // Test context cancelation. + timeBefore := time.Now() + connectCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + time.AfterFunc(100*time.Millisecond, cancel) + + copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)} + _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) + if err == nil { + t.Fatalf("NewClientTransport() returned successfully; wanted error") + } + t.Logf("NewClientTransport() = _, %v", err) + if time.Since(timeBefore) > 3*time.Second { + t.Fatalf("NewClientTransport returned > 2.9s after context cancelation") + } + + // Test context deadline. + connectCtx, cancel = context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + _, err = NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) + if err == nil { + t.Fatalf("NewClientTransport() returned successfully; wanted error") + } + t.Logf("NewClientTransport() = _, %v", err) +} + func (s) TestClientWithMisbehavedServer(t *testing.T) { // Create a misbehaving server. lis, err := net.Listen("tcp", "localhost:0") @@ -1250,10 +1374,14 @@ func (s) TestClientWithMisbehavedServer(t *testing.T) { } defer sconn.Close() if _, err := io.ReadFull(sconn, make([]byte, len(clientPreface))); err != nil { - t.Errorf("Error while reading clieng preface: %v", err) + t.Errorf("Error while reading client preface: %v", err) return } sfr := http2.NewFramer(sconn, sconn) + if err := sfr.WriteSettings(); err != nil { + t.Errorf("Error while writing settings: %v", err) + return + } if err := sfr.WriteSettingsAck(); err != nil { t.Errorf("Error while writing settings: %v", err) return @@ -1298,11 +1426,14 @@ func (s) TestClientWithMisbehavedServer(t *testing.T) { }() connectCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) defer cancel() - ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, ConnectOptions{}, func() {}, func(GoAwayReason) {}, func() {}) + + copts := ConnectOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil)} + ct, err := NewClientTransport(connectCtx, context.Background(), resolver.Address{Addr: lis.Addr().String()}, copts, func(GoAwayReason) {}) if err != nil { t.Fatalf("Error while creating client transport: %v", err) } - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) + str, err := ct.NewStream(connectCtx, &CallHdr{}) if err != nil { t.Fatalf("Error while creating stream: %v", err) @@ -1345,7 +1476,7 @@ func (s) TestEncodingRequiredStatus(t *testing.T) { if !testutils.StatusErrEqual(s.Status().Err(), encodingTestStatus.Err()) { t.Fatalf("stream with status %v, want %v", s.Status(), encodingTestStatus) } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -1367,7 +1498,7 @@ func (s) TestInvalidHeaderField(t *testing.T) { if se, ok := status.FromError(err); !ok || se.Code() != codes.Internal || !strings.Contains(err.Error(), expectedInvalidHeaderField) { t.Fatalf("Read got error %v, want error with code %s and contains %q", err, codes.Internal, expectedInvalidHeaderField) } - ct.Close() + ct.Close(fmt.Errorf("closed manually by test")) server.stop() } @@ -1375,7 +1506,7 @@ func (s) TestHeaderChanClosedAfterReceivingAnInvalidHeader(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, invalidHeaderField) defer cancel() defer server.stop() - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() s, err := ct.NewStream(ctx, &CallHdr{Host: "localhost", Method: "foo"}) @@ -1451,9 +1582,10 @@ func (s) TestAccountCheckWindowSizeWithLargeWindow(t *testing.T) { } func (s) TestAccountCheckWindowSizeWithSmallWindow(t *testing.T) { + // These settings disable dynamic window sizes based on BDP estimation; + // must be at least defaultWindowSize or the setting is ignored. wc := windowSizeConfig{ serverStream: defaultWindowSize, - // Note this is smaller than initialConnWindowSize which is the current default. serverConn: defaultWindowSize, clientStream: defaultWindowSize, clientConn: defaultWindowSize, @@ -1481,7 +1613,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) server, client, cancel := setUpWithOptions(t, 0, sc, pingpong, co) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() @@ -1495,10 +1627,11 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) for k := range server.conns { st = k.(*http2Server) } + server.mu.Unlock() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - server.mu.Unlock() - const numStreams = 10 + const numStreams = 5 clientStreams := make([]*Stream, numStreams) for i := 0; i < numStreams; i++ { var err error @@ -1518,26 +1651,27 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) binary.BigEndian.PutUint32(buf[1:], uint32(msgSize)) opts := Options{} header := make([]byte, 5) - for i := 1; i <= 10; i++ { + for i := 1; i <= 5; i++ { if err := client.Write(stream, nil, buf, &opts); err != nil { - t.Errorf("Error on client while writing message: %v", err) + t.Errorf("Error on client while writing message %v on stream %v: %v", i, stream.id, err) return } if _, err := stream.Read(header); err != nil { - t.Errorf("Error on client while reading data frame header: %v", err) + t.Errorf("Error on client while reading data frame header %v on stream %v: %v", i, stream.id, err) return } sz := binary.BigEndian.Uint32(header[1:]) recvMsg := make([]byte, int(sz)) if _, err := stream.Read(recvMsg); err != nil { - t.Errorf("Error on client while reading data: %v", err) + t.Errorf("Error on client while reading data %v on stream %v: %v", i, stream.id, err) return } if len(recvMsg) != msgSize { - t.Errorf("Length of message received by client: %v, want: %v", len(recvMsg), msgSize) + t.Errorf("Length of message %v received by client on stream %v: %v, want: %v", i, stream.id, len(recvMsg), msgSize) return } } + t.Logf("stream %v done with pingpongs", stream.id) }(stream) } wg.Wait() @@ -1546,6 +1680,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) loopyServerStreams := map[uint32]*outStream{} // Get all the streams from server reader and writer and client writer. st.mu.Lock() + client.mu.Lock() for _, stream := range clientStreams { id := stream.id serverStreams[id] = st.activeStreams[id] @@ -1553,6 +1688,7 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) loopyClientStreams[id] = client.loopy.estdStreams[id] } + client.mu.Unlock() st.mu.Unlock() // Close all streams for _, stream := range clientStreams { @@ -1563,8 +1699,8 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) } // Close down both server and client so that their internals can be read without data // races. - client.Close() - st.Close() + client.Close(errors.New("closed manually by test")) + st.Close(errors.New("closed manually by test")) <-st.readerDone <-st.writerDone <-client.readerDone @@ -1574,6 +1710,9 @@ func testFlowControlAccountCheck(t *testing.T, msgSize int, wc windowSizeConfig) sstream := serverStreams[id] loopyServerStream := loopyServerStreams[id] loopyClientStream := loopyClientStreams[id] + if loopyServerStream == nil { + t.Fatalf("Unexpected nil loopyServerStream") + } // Check stream flow control. if int(cstream.fc.limit+cstream.fc.delta-cstream.fc.pendingData-cstream.fc.pendingUpdate) != int(st.loopy.oiws)-loopyServerStream.bytesOutStanding { t.Fatalf("Account mismatch: client stream inflow limit(%d) + delta(%d) - pendingData(%d) - pendingUpdate(%d) != server outgoing InitialWindowSize(%d) - outgoingStream.bytesOutStanding(%d)", cstream.fc.limit, cstream.fc.delta, cstream.fc.pendingData, cstream.fc.pendingUpdate, st.loopy.oiws, loopyServerStream.bytesOutStanding) @@ -1663,81 +1802,354 @@ func (s) TestReadGivesSameErrorAfterAnyErrorOccurs(t *testing.T) { } } -// If the client sends an HTTP/2 request with a :method header with a value other than POST, as specified in -// the gRPC over HTTP/2 specification, the server should close the stream. -func (s) TestServerWithClientSendingWrongMethod(t *testing.T) { - server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) - defer server.stop() - // Create a client directly to not couple what you can send to API of http2_client.go. - mconn, err := net.Dial("tcp", server.lis.Addr().String()) - if err != nil { - t.Fatalf("Client failed to dial: %v", err) +// TestHeadersCausingStreamError tests headers that should cause a stream protocol +// error, which would end up with a RST_STREAM being sent to the client and also +// the server closing the stream. +func (s) TestHeadersCausingStreamError(t *testing.T) { + tests := []struct { + name string + headers []struct { + name string + values []string + } + }{ + // "Transports must consider requests containing the Connection header + // as malformed" - A41 Malformed requests map to a stream error of type + // PROTOCOL_ERROR. + { + name: "Connection header present", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + {name: "connection", values: []string{"not-supported"}}, + }, + }, + // multiple :authority or multiple Host headers would make the eventual + // :authority ambiguous as per A41. Since these headers won't have a + // content-type that corresponds to a grpc-client, the server should + // simply write a RST_STREAM to the wire. + { + // Note: multiple authority headers are handled by the framer + // itself, which will cause a stream error. Thus, it will never get + // to operateHeaders with the check in operateHeaders for stream + // error, but the server transport will still send a stream error. + name: "Multiple authority headers", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost", "localhost2"}}, + {name: "host", values: []string{"localhost"}}, + }, + }, } - defer mconn.Close() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) + defer server.stop() + // Create a client directly to not tie what you can send to API of + // http2_client.go (i.e. control headers being sent). + mconn, err := net.Dial("tcp", server.lis.Addr().String()) + if err != nil { + t.Fatalf("Client failed to dial: %v", err) + } + defer mconn.Close() - if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { - t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) - } + if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { + t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) + } - framer := http2.NewFramer(mconn, mconn) - if err := framer.WriteSettings(); err != nil { - t.Fatalf("Error while writing settings: %v", err) - } + framer := http2.NewFramer(mconn, mconn) + if err := framer.WriteSettings(); err != nil { + t.Fatalf("Error while writing settings: %v", err) + } - // success chan indicates that reader received a RSTStream from server. - // An error will be passed on it if any other frame is received. - success := testutils.NewChannel() + // result chan indicates that reader received a RSTStream from server. + // An error will be passed on it if any other frame is received. + result := testutils.NewChannel() - // Launch a reader goroutine. - go func() { - for { - frame, err := framer.ReadFrame() - if err != nil { - return - } - switch frame := frame.(type) { - case *http2.SettingsFrame: - // Do nothing. A settings frame is expected from server preface. - case *http2.RSTStreamFrame: - if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeProtocol { - // Client only created a single stream, so RST Stream should be for that single stream. - t.Errorf("RST stream received with streamID: %d and code %v, want streamID: 1 and code: http.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode)) + // Launch a reader goroutine. + go func() { + for { + frame, err := framer.ReadFrame() + if err != nil { + return + } + switch frame := frame.(type) { + case *http2.SettingsFrame: + // Do nothing. A settings frame is expected from server preface. + case *http2.RSTStreamFrame: + if frame.Header().StreamID != 1 || http2.ErrCode(frame.ErrCode) != http2.ErrCodeProtocol { + // Client only created a single stream, so RST Stream should be for that single stream. + result.Send(fmt.Errorf("RST stream received with streamID: %d and code %v, want streamID: 1 and code: http.ErrCodeFlowControl", frame.Header().StreamID, http2.ErrCode(frame.ErrCode))) + } + // Records that client successfully received RST Stream frame. + result.Send(nil) + return + default: + // The server should send nothing but a single RST Stream frame. + result.Send(errors.New("the client received a frame other than RST Stream")) + } + } + }() + + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) + + // Needs to build headers deterministically to conform to gRPC over + // HTTP/2 spec. + for _, header := range test.headers { + for _, value := range header.values { + if err := henc.WriteField(hpack.HeaderField{Name: header.name, Value: value}); err != nil { + t.Fatalf("Error while encoding header: %v", err) + } } - // Records that client successfully received RST Stream frame. - success.Send(nil) - return - default: - // The server should send nothing but a single RST Stream frame. - success.Send(errors.New("The client received a frame other than RST Stream")) } - } - }() - // Done with HTTP/2 setup - now create a stream with a bad method header. - var buf bytes.Buffer - henc := hpack.NewEncoder(&buf) - // Method is required to be POST in a gRPC call. - if err := henc.WriteField(hpack.HeaderField{Name: ":method", Value: "PUT"}); err != nil { - t.Fatalf("Error while encoding header: %v", err) - } - // Have the rest of the headers be ok and within the gRPC over HTTP/2 spec. - if err := henc.WriteField(hpack.HeaderField{Name: ":path", Value: "foo"}); err != nil { - t.Fatalf("Error while encoding header: %v", err) - } - if err := henc.WriteField(hpack.HeaderField{Name: ":authority", Value: "localhost"}); err != nil { - t.Fatalf("Error while encoding header: %v", err) - } - if err := henc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}); err != nil { - t.Fatalf("Error while encoding header: %v", err) + if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { + t.Fatalf("Error while writing headers: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + r, err := result.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving from channel: %v", err) + } + if r != nil { + t.Fatalf("want nil, got %v", r) + } + }) } +} - if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { - t.Fatalf("Error while writing headers: %v", err) +// TestHeadersHTTPStatusGRPCStatus tests requests with certain headers get a +// certain HTTP and gRPC status back. +func (s) TestHeadersHTTPStatusGRPCStatus(t *testing.T) { + tests := []struct { + name string + headers []struct { + name string + values []string + } + httpStatusWant string + grpcStatusWant string + grpcMessageWant string + }{ + // Note: multiple authority headers are handled by the framer itself, + // which will cause a stream error. Thus, it will never get to + // operateHeaders with the check in operateHeaders for possible + // grpc-status sent back. + + // multiple :authority or multiple Host headers would make the eventual + // :authority ambiguous as per A41. This takes precedence even over the + // fact a request is non grpc. All of these requests should be rejected + // with grpc-status Internal. Thus, requests with multiple hosts should + // get rejected with HTTP Status 400 and gRPC status Internal, + // regardless of whether the client is speaking gRPC or not. + { + name: "Multiple host headers non grpc", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "host", values: []string{"localhost", "localhost2"}}, + }, + httpStatusWant: "400", + grpcStatusWant: "13", + grpcMessageWant: "both must only have 1 value as per HTTP/2 spec", + }, + { + name: "Multiple host headers grpc", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + {name: "host", values: []string{"localhost", "localhost2"}}, + }, + httpStatusWant: "400", + grpcStatusWant: "13", + grpcMessageWant: "both must only have 1 value as per HTTP/2 spec", + }, + // If the client sends an HTTP/2 request with a :method header with a + // value other than POST, as specified in the gRPC over HTTP/2 + // specification, the server should fail the RPC. + { + name: "Client Sending Wrong Method", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"PUT"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + }, + httpStatusWant: "405", + grpcStatusWant: "13", + grpcMessageWant: "which should be POST", + }, + { + name: "Client Sending Wrong Content-Type", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/json"}}, + }, + httpStatusWant: "415", + grpcStatusWant: "3", + grpcMessageWant: `invalid gRPC request content-type "application/json"`, + }, + { + name: "Client Sending Bad Timeout", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + {name: "grpc-timeout", values: []string{"18f6n"}}, + }, + httpStatusWant: "400", + grpcStatusWant: "13", + grpcMessageWant: "malformed grpc-timeout", + }, + { + name: "Client Sending Bad Binary Header", + headers: []struct { + name string + values []string + }{ + {name: ":method", values: []string{"POST"}}, + {name: ":path", values: []string{"foo"}}, + {name: ":authority", values: []string{"localhost"}}, + {name: "content-type", values: []string{"application/grpc"}}, + {name: "foobar-bin", values: []string{"X()3e@#$-"}}, + }, + httpStatusWant: "400", + grpcStatusWant: "13", + grpcMessageWant: `header "foobar-bin": illegal base64 data`, + }, } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if e, err := success.Receive(ctx); e != nil || err != nil { - t.Fatalf("Error in frame server should send: %v. Error receiving from channel: %v", e, err) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + server := setUpServerOnly(t, 0, &ServerConfig{}, suspended) + defer server.stop() + // Create a client directly to not tie what you can send to API of + // http2_client.go (i.e. control headers being sent). + mconn, err := net.Dial("tcp", server.lis.Addr().String()) + if err != nil { + t.Fatalf("Client failed to dial: %v", err) + } + defer mconn.Close() + + if n, err := mconn.Write(clientPreface); err != nil || n != len(clientPreface) { + t.Fatalf("mconn.Write(clientPreface) = %d, %v, want %d, ", n, err, len(clientPreface)) + } + + framer := http2.NewFramer(mconn, mconn) + framer.ReadMetaHeaders = hpack.NewDecoder(4096, nil) + if err := framer.WriteSettings(); err != nil { + t.Fatalf("Error while writing settings: %v", err) + } + + // result chan indicates that reader received a Headers Frame with + // desired grpc status and message from server. An error will be passed + // on it if any other frame is received. + result := testutils.NewChannel() + + // Launch a reader goroutine. + go func() { + for { + frame, err := framer.ReadFrame() + if err != nil { + return + } + switch frame := frame.(type) { + case *http2.SettingsFrame: + // Do nothing. A settings frame is expected from server preface. + case *http2.MetaHeadersFrame: + var httpStatus, grpcStatus, grpcMessage string + for _, header := range frame.Fields { + if header.Name == ":status" { + httpStatus = header.Value + } + if header.Name == "grpc-status" { + grpcStatus = header.Value + } + if header.Name == "grpc-message" { + grpcMessage = header.Value + } + } + if httpStatus != test.httpStatusWant { + result.Send(fmt.Errorf("incorrect HTTP Status got %v, want %v", httpStatus, test.httpStatusWant)) + return + } + if grpcStatus != test.grpcStatusWant { // grpc status code internal + result.Send(fmt.Errorf("incorrect gRPC Status got %v, want %v", grpcStatus, test.grpcStatusWant)) + return + } + if !strings.Contains(grpcMessage, test.grpcMessageWant) { + result.Send(fmt.Errorf("incorrect gRPC message, want %q got %q", test.grpcMessageWant, grpcMessage)) + return + } + + // Records that client successfully received a HeadersFrame + // with expected Trailers-Only response. + result.Send(nil) + return + default: + // The server should send nothing but a single Settings and Headers frame. + result.Send(errors.New("the client received a frame other than Settings or Headers")) + } + } + }() + + var buf bytes.Buffer + henc := hpack.NewEncoder(&buf) + + // Needs to build headers deterministically to conform to gRPC over + // HTTP/2 spec. + for _, header := range test.headers { + for _, value := range header.values { + if err := henc.WriteField(hpack.HeaderField{Name: header.name, Value: value}); err != nil { + t.Fatalf("Error while encoding header: %v", err) + } + } + } + + if err := framer.WriteHeaders(http2.HeadersFrameParam{StreamID: 1, BlockFragment: buf.Bytes(), EndHeaders: true}); err != nil { + t.Fatalf("Error while writing headers: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + r, err := result.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving from channel: %v", err) + } + if r != nil { + t.Fatalf("want nil, got %v", r) + } + }) } } @@ -1757,12 +2169,12 @@ func (s) TestPingPong1MB(t *testing.T) { runPingPongTest(t, 1048576) } -//This is a stress-test of flow control logic. +// This is a stress-test of flow control logic. func runPingPongTest(t *testing.T, msgSize int) { server, client, cancel := setUp(t, 0, 0, pingpong) defer cancel() defer server.stop() - defer client.Close() + defer client.Close(fmt.Errorf("closed manually by test")) waitWhileTrue(t, func() (bool, error) { server.mu.Lock() defer server.mu.Unlock() @@ -1783,34 +2195,27 @@ func runPingPongTest(t *testing.T, msgSize int) { binary.BigEndian.PutUint32(outgoingHeader[1:], uint32(msgSize)) opts := &Options{} incomingHeader := make([]byte, 5) - done := make(chan struct{}) - go func() { - timer := time.NewTimer(time.Second * 5) - <-timer.C - close(done) - }() - for { - select { - case <-done: - client.Write(stream, nil, nil, &Options{Last: true}) - if _, err := stream.Read(incomingHeader); err != io.EOF { - t.Fatalf("Client expected EOF from the server. Got: %v", err) - } - return - default: - if err := client.Write(stream, outgoingHeader, msg, opts); err != nil { - t.Fatalf("Error on client while writing message. Err: %v", err) - } - if _, err := stream.Read(incomingHeader); err != nil { - t.Fatalf("Error on client while reading data header. Err: %v", err) - } - sz := binary.BigEndian.Uint32(incomingHeader[1:]) - recvMsg := make([]byte, int(sz)) - if _, err := stream.Read(recvMsg); err != nil { - t.Fatalf("Error on client while reading data. Err: %v", err) - } + + ctx, cancel = context.WithTimeout(ctx, time.Second) + defer cancel() + for ctx.Err() == nil { + if err := client.Write(stream, outgoingHeader, msg, opts); err != nil { + t.Fatalf("Error on client while writing message. Err: %v", err) + } + if _, err := stream.Read(incomingHeader); err != nil { + t.Fatalf("Error on client while reading data header. Err: %v", err) + } + sz := binary.BigEndian.Uint32(incomingHeader[1:]) + recvMsg := make([]byte, int(sz)) + if _, err := stream.Read(recvMsg); err != nil { + t.Fatalf("Error on client while reading data. Err: %v", err) } } + + client.Write(stream, nil, nil, &Options{Last: true}) + if _, err := stream.Read(incomingHeader); err != io.EOF { + t.Fatalf("Client expected EOF from the server. Got: %v", err) + } } type tableSizeLimit struct { @@ -1850,7 +2255,7 @@ func (s) TestHeaderTblSize(t *testing.T) { server, ct, cancel := setUp(t, 0, math.MaxUint32, normal) defer cancel() - defer ct.Close() + defer ct.Close(fmt.Errorf("closed manually by test")) defer server.stop() ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() @@ -1965,14 +2370,291 @@ func (s) TestClientHandshakeInfo(t *testing.T) { defer cancel() creds := &attrTransportCreds{} - tr, err := NewClientTransport(ctx, context.Background(), addr, ConnectOptions{TransportCredentials: creds}, func() {}, func(GoAwayReason) {}, func() {}) + copts := ConnectOptions{ + TransportCredentials: creds, + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + } + tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}) if err != nil { t.Fatalf("NewClientTransport(): %v", err) } - defer tr.Close() + defer tr.Close(fmt.Errorf("closed manually by test")) wantAttr := attributes.New(testAttrKey, testAttrVal) if gotAttr := creds.attr; !cmp.Equal(gotAttr, wantAttr, cmp.AllowUnexported(attributes.Attributes{})) { t.Fatalf("received attributes %v in creds, want %v", gotAttr, wantAttr) } } + +// TestClientHandshakeInfoDialer adds attributes to the resolver.Address passes to +// NewClientTransport and verifies that these attributes are received by a custom +// dialer. +func (s) TestClientHandshakeInfoDialer(t *testing.T) { + server := setUpServerOnly(t, 0, &ServerConfig{}, pingpong) + defer server.stop() + + const ( + testAttrKey = "foo" + testAttrVal = "bar" + ) + addr := resolver.Address{ + Addr: "localhost:" + server.port, + Attributes: attributes.New(testAttrKey, testAttrVal), + } + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second)) + defer cancel() + + var attr *attributes.Attributes + dialer := func(ctx context.Context, addr string) (net.Conn, error) { + ai := credentials.ClientHandshakeInfoFromContext(ctx) + attr = ai.Attributes + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) + } + + copts := ConnectOptions{ + Dialer: dialer, + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefSubChannel, time.Now().Unix(), nil), + } + tr, err := NewClientTransport(ctx, context.Background(), addr, copts, func(GoAwayReason) {}) + if err != nil { + t.Fatalf("NewClientTransport(): %v", err) + } + defer tr.Close(fmt.Errorf("closed manually by test")) + + wantAttr := attributes.New(testAttrKey, testAttrVal) + if gotAttr := attr; !cmp.Equal(gotAttr, wantAttr, cmp.AllowUnexported(attributes.Attributes{})) { + t.Errorf("Received attributes %v in custom dialer, want %v", gotAttr, wantAttr) + } +} + +func (s) TestClientDecodeHeaderStatusErr(t *testing.T) { + testStream := func() *Stream { + return &Stream{ + done: make(chan struct{}), + headerChan: make(chan struct{}), + buf: &recvBuffer{ + c: make(chan recvMsg), + mu: sync.Mutex{}, + }, + } + } + + testClient := func(ts *Stream) *http2Client { + return &http2Client{ + mu: sync.Mutex{}, + activeStreams: map[uint32]*Stream{ + 0: ts, + }, + controlBuf: &controlBuffer{ + ch: make(chan struct{}), + done: make(chan struct{}), + list: &itemList{}, + }, + } + } + + for _, test := range []struct { + name string + // input + metaHeaderFrame *http2.MetaHeadersFrame + // output + wantStatus *status.Status + }{ + { + name: "valid header", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/grpc"}, + {Name: "grpc-status", Value: "0"}, + {Name: ":status", Value: "200"}, + }, + }, + // no error + wantStatus: status.New(codes.OK, ""), + }, + { + name: "missing content-type header", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "grpc-status", Value: "0"}, + {Name: ":status", Value: "200"}, + }, + }, + wantStatus: status.New( + codes.Unknown, + "malformed header: missing HTTP content-type", + ), + }, + { + name: "invalid grpc status header field", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/grpc"}, + {Name: "grpc-status", Value: "xxxx"}, + {Name: ":status", Value: "200"}, + }, + }, + wantStatus: status.New( + codes.Internal, + "transport: malformed grpc-status: strconv.ParseInt: parsing \"xxxx\": invalid syntax", + ), + }, + { + name: "invalid http content type", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/json"}, + }, + }, + wantStatus: status.New( + codes.Internal, + "malformed header: missing HTTP status; transport: received unexpected content-type \"application/json\"", + ), + }, + { + name: "http fallback and invalid http status", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + // No content type provided then fallback into handling http error. + {Name: ":status", Value: "xxxx"}, + }, + }, + wantStatus: status.New( + codes.Internal, + "transport: malformed http-status: strconv.ParseInt: parsing \"xxxx\": invalid syntax", + ), + }, + { + name: "http2 frame size exceeds", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: nil, + Truncated: true, + }, + wantStatus: status.New( + codes.Internal, + "peer header list size exceeded limit", + ), + }, + { + name: "bad status in grpc mode", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/grpc"}, + {Name: "grpc-status", Value: "0"}, + {Name: ":status", Value: "504"}, + }, + }, + wantStatus: status.New( + codes.Unavailable, + "unexpected HTTP status code received from server: 504 (Gateway Timeout)", + ), + }, + { + name: "missing http status", + metaHeaderFrame: &http2.MetaHeadersFrame{ + Fields: []hpack.HeaderField{ + {Name: "content-type", Value: "application/grpc"}, + }, + }, + wantStatus: status.New( + codes.Internal, + "malformed header: missing HTTP status", + ), + }, + } { + + t.Run(test.name, func(t *testing.T) { + ts := testStream() + s := testClient(ts) + + test.metaHeaderFrame.HeadersFrame = &http2.HeadersFrame{ + FrameHeader: http2.FrameHeader{ + StreamID: 0, + }, + } + + s.operateHeaders(test.metaHeaderFrame) + + got := ts.status + want := test.wantStatus + if got.Code() != want.Code() || got.Message() != want.Message() { + t.Fatalf("operateHeaders(%v); status = \ngot: %s\nwant: %s", test.metaHeaderFrame, got, want) + } + }) + t.Run(fmt.Sprintf("%s-end_stream", test.name), func(t *testing.T) { + ts := testStream() + s := testClient(ts) + + test.metaHeaderFrame.HeadersFrame = &http2.HeadersFrame{ + FrameHeader: http2.FrameHeader{ + StreamID: 0, + Flags: http2.FlagHeadersEndStream, + }, + } + + s.operateHeaders(test.metaHeaderFrame) + + got := ts.status + want := test.wantStatus + if got.Code() != want.Code() || got.Message() != want.Message() { + t.Fatalf("operateHeaders(%v); status = \ngot: %s\nwant: %s", test.metaHeaderFrame, got, want) + } + }) + } +} + +func TestConnectionError_Unwrap(t *testing.T) { + err := connectionErrorf(false, os.ErrNotExist, "unwrap me") + if !errors.Is(err, os.ErrNotExist) { + t.Error("ConnectionError does not unwrap") + } +} + +func (s) TestPeerSetInServerContext(t *testing.T) { + // create client and server transports. + server, client, cancel := setUp(t, 0, math.MaxUint32, normal) + defer cancel() + defer server.stop() + defer client.Close(fmt.Errorf("closed manually by test")) + + // create a stream with client transport. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := client.NewStream(ctx, &CallHdr{}) + if err != nil { + t.Fatalf("failed to create a stream: %v", err) + } + + waitWhileTrue(t, func() (bool, error) { + server.mu.Lock() + defer server.mu.Unlock() + + if len(server.conns) == 0 { + return true, fmt.Errorf("timed-out while waiting for connection to be created on the server") + } + return false, nil + }) + + // verify peer is set in client transport context. + if _, ok := peer.FromContext(client.ctx); !ok { + t.Fatalf("Peer expected in client transport's context, but actually not found.") + } + + // verify peer is set in stream context. + if _, ok := peer.FromContext(stream.ctx); !ok { + t.Fatalf("Peer expected in stream context, but actually not found.") + } + + // verify peer is set in server transport context. + server.mu.Lock() + for k := range server.conns { + sc, ok := k.(*http2Server) + if !ok { + t.Fatalf("ServerTransport is of type %T, want %T", k, &http2Server{}) + } + if _, ok = peer.FromContext(sc.ctx); !ok { + t.Fatalf("Peer expected in server transport's context, but actually not found.") + } + } + server.mu.Unlock() +} diff --git a/internal/wrr/random.go b/internal/wrr/random.go index ccf5113e9f32..6d5eb7d46209 100644 --- a/internal/wrr/random.go +++ b/internal/wrr/random.go @@ -19,6 +19,7 @@ package wrr import ( "fmt" + "sort" "sync" "google.golang.org/grpc/internal/grpcrand" @@ -26,8 +27,9 @@ import ( // weightedItem is a wrapped weighted item that is used to implement weighted random algorithm. type weightedItem struct { - Item interface{} - Weight int64 + item interface{} + weight int64 + accumulatedWeight int64 } func (w *weightedItem) String() string { @@ -36,9 +38,10 @@ func (w *weightedItem) String() string { // randomWRR is a struct that contains weighted items implement weighted random algorithm. type randomWRR struct { - mu sync.RWMutex - items []*weightedItem - sumOfWeights int64 + mu sync.RWMutex + items []*weightedItem + // Are all item's weights equal + equalWeights bool } // NewRandom creates a new WRR with random. @@ -51,27 +54,36 @@ var grpcrandInt63n = grpcrand.Int63n func (rw *randomWRR) Next() (item interface{}) { rw.mu.RLock() defer rw.mu.RUnlock() - if rw.sumOfWeights == 0 { + if len(rw.items) == 0 { return nil } - // Random number in [0, sum). - randomWeight := grpcrandInt63n(rw.sumOfWeights) - for _, item := range rw.items { - randomWeight = randomWeight - item.Weight - if randomWeight < 0 { - return item.Item - } + if rw.equalWeights { + return rw.items[grpcrandInt63n(int64(len(rw.items)))].item } - return rw.items[len(rw.items)-1].Item + sumOfWeights := rw.items[len(rw.items)-1].accumulatedWeight + // Random number in [0, sumOfWeights). + randomWeight := grpcrandInt63n(sumOfWeights) + // Item's accumulated weights are in ascending order, because item's weight >= 0. + // Binary search rw.items to find first item whose accumulatedWeight > randomWeight + // The return i is guaranteed to be in range [0, len(rw.items)) because randomWeight < last item's accumulatedWeight + i := sort.Search(len(rw.items), func(i int) bool { return rw.items[i].accumulatedWeight > randomWeight }) + return rw.items[i].item } func (rw *randomWRR) Add(item interface{}, weight int64) { rw.mu.Lock() defer rw.mu.Unlock() - rItem := &weightedItem{Item: item, Weight: weight} + accumulatedWeight := weight + equalWeights := true + if len(rw.items) > 0 { + lastItem := rw.items[len(rw.items)-1] + accumulatedWeight = lastItem.accumulatedWeight + weight + equalWeights = rw.equalWeights && weight == lastItem.weight + } + rw.equalWeights = equalWeights + rItem := &weightedItem{item: item, weight: weight, accumulatedWeight: accumulatedWeight} rw.items = append(rw.items, rItem) - rw.sumOfWeights += weight } func (rw *randomWRR) String() string { diff --git a/internal/wrr/wrr_test.go b/internal/wrr/wrr_test.go index 4565e34ffb9c..ce4f5e507a2c 100644 --- a/internal/wrr/wrr_test.go +++ b/internal/wrr/wrr_test.go @@ -21,6 +21,7 @@ import ( "errors" "math" "math/rand" + "strconv" "testing" "github.com/google/go-cmp/cmp" @@ -70,12 +71,22 @@ func testWRRNext(t *testing.T, newWRR func() WRR) { name: "17-23-37", weights: []int64{17, 23, 37}, }, + { + name: "no items", + weights: []int64{}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var sumOfWeights int64 - w := newWRR() + if len(tt.weights) == 0 { + if next := w.Next(); next != nil { + t.Fatalf("w.Next returns non nil value:%v when there is no item", next) + } + return + } + + var sumOfWeights int64 for i, weight := range tt.weights { w.Add(i, weight) sumOfWeights += weight @@ -112,6 +123,70 @@ func (s) TestEdfWrrNext(t *testing.T) { testWRRNext(t, NewEDF) } +func BenchmarkRandomWRRNext(b *testing.B) { + for _, n := range []int{100, 500, 1000} { + b.Run("equal-weights-"+strconv.Itoa(n)+"-items", func(b *testing.B) { + w := NewRandom() + sumOfWeights := n + for i := 0; i < n; i++ { + w.Add(i, 1) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for i := 0; i < sumOfWeights; i++ { + w.Next() + } + } + }) + } + + var maxWeight int64 = 1024 + for _, n := range []int{100, 500, 1000} { + b.Run("random-weights-"+strconv.Itoa(n)+"-items", func(b *testing.B) { + w := NewRandom() + var sumOfWeights int64 + for i := 0; i < n; i++ { + weight := rand.Int63n(maxWeight + 1) + w.Add(i, weight) + sumOfWeights += weight + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for i := 0; i < int(sumOfWeights); i++ { + w.Next() + } + } + }) + } + + itemsNum := 200 + heavyWeight := int64(itemsNum) + lightWeight := int64(1) + heavyIndices := []int{0, itemsNum / 2, itemsNum - 1} + for _, heavyIndex := range heavyIndices { + b.Run("skew-weights-heavy-index-"+strconv.Itoa(heavyIndex), func(b *testing.B) { + w := NewRandom() + var sumOfWeights int64 + for i := 0; i < itemsNum; i++ { + var weight int64 + if i == heavyIndex { + weight = heavyWeight + } else { + weight = lightWeight + } + sumOfWeights += weight + w.Add(i, weight) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for i := 0; i < int(sumOfWeights); i++ { + w.Next() + } + } + }) + } +} + func init() { r := rand.New(rand.NewSource(0)) grpcrandInt63n = r.Int63n diff --git a/internal/xds/bootstrap.go b/internal/xds/bootstrap.go deleted file mode 100644 index 97ec8e17208e..000000000000 --- a/internal/xds/bootstrap.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xds - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/xds/env" -) - -var logger = grpclog.Component("internal/xds") - -// TransportAPI refers to the API version for xDS transport protocol. -type TransportAPI int - -const ( - // TransportV2 refers to the v2 xDS transport protocol. - TransportV2 TransportAPI = iota - // TransportV3 refers to the v3 xDS transport protocol. - TransportV3 -) - -// BootstrapOptions wraps the parameters passed to SetupBootstrapFile. -type BootstrapOptions struct { - // Version is the xDS transport protocol version. - Version TransportAPI - // NodeID is the node identifier of the gRPC client/server node in the - // proxyless service mesh. - NodeID string - // ServerURI is the address of the management server. - ServerURI string - // ServerListenerResourceNameTemplate is the Listener resource name to fetch. - ServerListenerResourceNameTemplate string - // CertificateProviders is the certificate providers configuration. - CertificateProviders map[string]json.RawMessage -} - -// SetupBootstrapFile creates a temporary file with bootstrap contents, based on -// the passed in options, and updates the bootstrap environment variable to -// point to this file. -// -// Returns a cleanup function which will be non-nil if the setup process was -// completed successfully. It is the responsibility of the caller to invoke the -// cleanup function at the end of the test. -func SetupBootstrapFile(opts BootstrapOptions) (func(), error) { - f, err := ioutil.TempFile("", "test_xds_bootstrap_*") - if err != nil { - return nil, fmt.Errorf("failed to created bootstrap file: %v", err) - } - - cfg := &bootstrapConfig{ - XdsServers: []server{ - { - ServerURI: opts.ServerURI, - ChannelCreds: []creds{ - { - Type: "insecure", - }, - }, - }, - }, - Node: node{ - ID: opts.NodeID, - }, - CertificateProviders: opts.CertificateProviders, - ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, - } - switch opts.Version { - case TransportV2: - // TODO: Add any v2 specific fields. - case TransportV3: - cfg.XdsServers[0].ServerFeatures = append(cfg.XdsServers[0].ServerFeatures, "xds_v3") - default: - return nil, fmt.Errorf("unsupported xDS transport protocol version: %v", opts.Version) - } - - bootstrapContents, err := json.MarshalIndent(cfg, "", " ") - if err != nil { - return nil, fmt.Errorf("failed to created bootstrap file: %v", err) - } - if err := ioutil.WriteFile(f.Name(), bootstrapContents, 0644); err != nil { - return nil, fmt.Errorf("failed to created bootstrap file: %v", err) - } - logger.Infof("Created bootstrap file at %q with contents: %s\n", f.Name(), bootstrapContents) - - origBootstrapFileName := env.BootstrapFileName - env.BootstrapFileName = f.Name() - return func() { - os.Remove(f.Name()) - env.BootstrapFileName = origBootstrapFileName - }, nil -} - -type bootstrapConfig struct { - XdsServers []server `json:"xds_servers,omitempty"` - Node node `json:"node,omitempty"` - CertificateProviders map[string]json.RawMessage `json:"certificate_providers,omitempty"` - ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` -} - -type server struct { - ServerURI string `json:"server_uri,omitempty"` - ChannelCreds []creds `json:"channel_creds,omitempty"` - ServerFeatures []string `json:"server_features,omitempty"` -} - -type creds struct { - Type string `json:"type,omitempty"` - Config interface{} `json:"config,omitempty"` -} - -type node struct { - ID string `json:"id,omitempty"` -} diff --git a/internal/xds/env/env.go b/internal/xds/env/env.go deleted file mode 100644 index 1110722a630b..000000000000 --- a/internal/xds/env/env.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package env acts a single source of definition for all environment variables -// related to the xDS implementation in gRPC. -package env - -import ( - "os" - "strings" -) - -const ( - // BootstrapFileNameEnv is the env variable to set bootstrap file name. - // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. - // - // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // BootstrapFileContentEnv is the env variable to set bootstrapp file - // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. - // - // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - circuitBreakingSupportEnv = "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING" - timeoutSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT" - faultInjectionSupportEnv = "GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - - c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" - c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" -) - -var ( - // BootstrapFileName holds the name of the file which contains xDS bootstrap - // configuration. Users can specify the location of the bootstrap file by - // setting the environment variable "GRPC_XDS_BOOSTRAP". - // - // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileName = os.Getenv(BootstrapFileNameEnv) - // BootstrapFileContent holds the content of the xDS bootstrap - // configuration. Users can specify the bootstrap config by - // setting the environment variable "GRPC_XDS_BOOSTRAP_CONFIG". - // - // When both bootstrap FileName and FileContent are set, FileName is used. - BootstrapFileContent = os.Getenv(BootstrapFileContentEnv) - // CircuitBreakingSupport indicates whether circuit breaking support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING" to "false". - CircuitBreakingSupport = !strings.EqualFold(os.Getenv(circuitBreakingSupportEnv), "false") - // TimeoutSupport indicates whether support for max_stream_duration in - // route actions is enabled. This can be disabled by setting the - // environment variable "GRPC_XDS_EXPERIMENTAL_ENABLE_TIMEOUT" to "false". - TimeoutSupport = !strings.EqualFold(os.Getenv(timeoutSupportEnv), "false") - // FaultInjectionSupport is used to control both fault injection and HTTP - // filter support. - FaultInjectionSupport = !strings.EqualFold(os.Getenv(faultInjectionSupportEnv), "false") - // C2PResolverSupport indicates whether support for C2P resolver is enabled. - // This can be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true". - C2PResolverSupport = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true") - // ClientSideSecuritySupport is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - ClientSideSecuritySupport = strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "true") - // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) -) diff --git a/internal/xds/matcher/matcher_header.go b/internal/xds/matcher/matcher_header.go new file mode 100644 index 000000000000..01433f4122a2 --- /dev/null +++ b/internal/xds/matcher/matcher_header.go @@ -0,0 +1,274 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package matcher + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" +) + +// HeaderMatcher is an interface for header matchers. These are +// documented in (EnvoyProxy link here?). These matchers will match on different +// aspects of HTTP header name/value pairs. +type HeaderMatcher interface { + Match(metadata.MD) bool + String() string +} + +// mdValuesFromOutgoingCtx retrieves metadata from context. If there are +// multiple values, the values are concatenated with "," (comma and no space). +// +// All header matchers only match against the comma-concatenated string. +func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { + vs, ok := md[key] + if !ok { + return "", false + } + return strings.Join(vs, ","), true +} + +// HeaderExactMatcher matches on an exact match of the value of the header. +type HeaderExactMatcher struct { + key string + exact string + invert bool +} + +// NewHeaderExactMatcher returns a new HeaderExactMatcher. +func NewHeaderExactMatcher(key, exact string, invert bool) *HeaderExactMatcher { + return &HeaderExactMatcher{key: key, exact: exact, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderExactMatcher. +func (hem *HeaderExactMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hem.key) + if !ok { + return false + } + return (v == hem.exact) != hem.invert +} + +func (hem *HeaderExactMatcher) String() string { + return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact) +} + +// HeaderRegexMatcher matches on whether the entire request header value matches +// the regex. +type HeaderRegexMatcher struct { + key string + re *regexp.Regexp + invert bool +} + +// NewHeaderRegexMatcher returns a new HeaderRegexMatcher. +func NewHeaderRegexMatcher(key string, re *regexp.Regexp, invert bool) *HeaderRegexMatcher { + return &HeaderRegexMatcher{key: key, re: re, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRegexMatcher. +func (hrm *HeaderRegexMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + return grpcutil.FullMatchWithRegex(hrm.re, v) != hrm.invert +} + +func (hrm *HeaderRegexMatcher) String() string { + return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String()) +} + +// HeaderRangeMatcher matches on whether the request header value is within the +// range. The header value must be an integer in base 10 notation. +type HeaderRangeMatcher struct { + key string + start, end int64 // represents [start, end). + invert bool +} + +// NewHeaderRangeMatcher returns a new HeaderRangeMatcher. +func NewHeaderRangeMatcher(key string, start, end int64, invert bool) *HeaderRangeMatcher { + return &HeaderRangeMatcher{key: key, start: start, end: end, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderRangeMatcher. +func (hrm *HeaderRangeMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hrm.key) + if !ok { + return false + } + if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { + return !hrm.invert + } + return hrm.invert +} + +func (hrm *HeaderRangeMatcher) String() string { + return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end) +} + +// HeaderPresentMatcher will match based on whether the header is present in the +// whole request. +type HeaderPresentMatcher struct { + key string + present bool +} + +// NewHeaderPresentMatcher returns a new HeaderPresentMatcher. +func NewHeaderPresentMatcher(key string, present bool, invert bool) *HeaderPresentMatcher { + if invert { + present = !present + } + return &HeaderPresentMatcher{key: key, present: present} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPresentMatcher. +func (hpm *HeaderPresentMatcher) Match(md metadata.MD) bool { + vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) + present := ok && len(vs) > 0 // TODO: Are we sure we need this len(vs) > 0? + return present == hpm.present +} + +func (hpm *HeaderPresentMatcher) String() string { + return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present) +} + +// HeaderPrefixMatcher matches on whether the prefix of the header value matches +// the prefix passed into this struct. +type HeaderPrefixMatcher struct { + key string + prefix string + invert bool +} + +// NewHeaderPrefixMatcher returns a new HeaderPrefixMatcher. +func NewHeaderPrefixMatcher(key string, prefix string, invert bool) *HeaderPrefixMatcher { + return &HeaderPrefixMatcher{key: key, prefix: prefix, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderPrefixMatcher. +func (hpm *HeaderPrefixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hpm.key) + if !ok { + return false + } + return strings.HasPrefix(v, hpm.prefix) != hpm.invert +} + +func (hpm *HeaderPrefixMatcher) String() string { + return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix) +} + +// HeaderSuffixMatcher matches on whether the suffix of the header value matches +// the suffix passed into this struct. +type HeaderSuffixMatcher struct { + key string + suffix string + invert bool +} + +// NewHeaderSuffixMatcher returns a new HeaderSuffixMatcher. +func NewHeaderSuffixMatcher(key string, suffix string, invert bool) *HeaderSuffixMatcher { + return &HeaderSuffixMatcher{key: key, suffix: suffix, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderSuffixMatcher. +func (hsm *HeaderSuffixMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return strings.HasSuffix(v, hsm.suffix) != hsm.invert +} + +func (hsm *HeaderSuffixMatcher) String() string { + return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) +} + +// HeaderContainsMatcher matches on whether the header value contains the +// value passed into this struct. +type HeaderContainsMatcher struct { + key string + contains string + invert bool +} + +// NewHeaderContainsMatcher returns a new HeaderContainsMatcher. key is the HTTP +// Header key to match on, and contains is the value that the header should +// should contain for a successful match. An empty contains string does not +// work, use HeaderPresentMatcher in that case. +func NewHeaderContainsMatcher(key string, contains string, invert bool) *HeaderContainsMatcher { + return &HeaderContainsMatcher{key: key, contains: contains, invert: invert} +} + +// Match returns whether the passed in HTTP Headers match according to the +// HeaderContainsMatcher. +func (hcm *HeaderContainsMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hcm.key) + if !ok { + return false + } + return strings.Contains(v, hcm.contains) != hcm.invert +} + +func (hcm *HeaderContainsMatcher) String() string { + return fmt.Sprintf("headerContains:%v%v", hcm.key, hcm.contains) +} + +// HeaderStringMatcher matches on whether the header value matches against the +// StringMatcher specified. +type HeaderStringMatcher struct { + key string + stringMatcher StringMatcher + invert bool +} + +// NewHeaderStringMatcher returns a new HeaderStringMatcher. +func NewHeaderStringMatcher(key string, sm StringMatcher, invert bool) *HeaderStringMatcher { + return &HeaderStringMatcher{ + key: key, + stringMatcher: sm, + invert: invert, + } +} + +// Match returns whether the passed in HTTP Headers match according to the +// specified StringMatcher. +func (hsm *HeaderStringMatcher) Match(md metadata.MD) bool { + v, ok := mdValuesFromOutgoingCtx(md, hsm.key) + if !ok { + return false + } + return hsm.stringMatcher.Match(v) != hsm.invert +} + +func (hsm *HeaderStringMatcher) String() string { + return fmt.Sprintf("headerString:%v:%v", hsm.key, hsm.stringMatcher) +} diff --git a/xds/internal/resolver/matcher_header_test.go b/internal/xds/matcher/matcher_header_test.go similarity index 51% rename from xds/internal/resolver/matcher_header_test.go rename to internal/xds/matcher/matcher_header_test.go index fb87cc5dd329..9a20cf12b0f9 100644 --- a/xds/internal/resolver/matcher_header_test.go +++ b/internal/xds/matcher/matcher_header_test.go @@ -16,7 +16,7 @@ * */ -package resolver +package matcher import ( "regexp" @@ -31,6 +31,7 @@ func TestHeaderExactMatcherMatch(t *testing.T) { key, exact string md metadata.MD want bool + invert bool }{ { name: "one value one match", @@ -61,11 +62,35 @@ func TestHeaderExactMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "abc"), want: false, }, + { + name: "invert header not present", + key: "th", + exact: "tv", + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + exact: "tv", + md: metadata.Pairs("th", "tv"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + exact: "tv", + md: metadata.Pairs("th", "tvv"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hem := newHeaderExactMatcher(tt.key, tt.exact) - if got := hem.match(tt.md); got != tt.want { + hem := NewHeaderExactMatcher(tt.key, tt.exact, tt.invert) + if got := hem.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -78,6 +103,7 @@ func TestHeaderRegexMatcherMatch(t *testing.T) { key, regexStr string md metadata.MD want bool + invert bool }{ { name: "one value one match", @@ -107,11 +133,49 @@ func TestHeaderRegexMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "abc"), want: false, }, + { + name: "no match because only part of value matches with regex", + key: "header", + regexStr: "^a+$", + md: metadata.Pairs("header", "ab"), + want: false, + }, + { + name: "match because full value matches with regex", + key: "header", + regexStr: "^a+$", + md: metadata.Pairs("header", "aa"), + want: true, + }, + { + name: "invert header not present", + key: "th", + regexStr: "^t+v*$", + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + regexStr: "^t+v*$", + md: metadata.Pairs("th", "tttvv"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + regexStr: "^t+v*$", + md: metadata.Pairs("th", "abc"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hrm := newHeaderRegexMatcher(tt.key, regexp.MustCompile(tt.regexStr)) - if got := hrm.match(tt.md); got != tt.want { + hrm := NewHeaderRegexMatcher(tt.key, regexp.MustCompile(tt.regexStr), tt.invert) + if got := hrm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -125,6 +189,7 @@ func TestHeaderRangeMatcherMatch(t *testing.T) { start, end int64 md metadata.MD want bool + invert bool }{ { name: "match", @@ -154,11 +219,35 @@ func TestHeaderRangeMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "-5"), want: true, }, + { + name: "invert header not present", + key: "th", + start: 1, end: 10, + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + start: 1, end: 10, + md: metadata.Pairs("th", "5"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + start: 1, end: 9, + md: metadata.Pairs("th", "10"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hrm := newHeaderRangeMatcher(tt.key, tt.start, tt.end) - if got := hrm.match(tt.md); got != tt.want { + hrm := NewHeaderRangeMatcher(tt.key, tt.start, tt.end, tt.invert) + if got := hrm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -172,6 +261,7 @@ func TestHeaderPresentMatcherMatch(t *testing.T) { present bool md metadata.MD want bool + invert bool }{ { name: "want present is present", @@ -201,11 +291,35 @@ func TestHeaderPresentMatcherMatch(t *testing.T) { md: metadata.Pairs("abc", "tv"), want: true, }, + { + name: "invert header not present", + key: "th", + present: true, + md: metadata.Pairs(":method", "GET"), + want: true, + invert: true, + }, + { + name: "invert header match", + key: "th", + present: true, + md: metadata.Pairs("th", "tv"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + present: true, + md: metadata.Pairs(":method", "GET"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hpm := newHeaderPresentMatcher(tt.key, tt.present) - if got := hpm.match(tt.md); got != tt.want { + hpm := NewHeaderPresentMatcher(tt.key, tt.present, tt.invert) + if got := hpm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -218,6 +332,7 @@ func TestHeaderPrefixMatcherMatch(t *testing.T) { key, prefix string md metadata.MD want bool + invert bool }{ { name: "one value one match", @@ -247,11 +362,35 @@ func TestHeaderPrefixMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "abc"), want: false, }, + { + name: "invert header not present", + key: "th", + prefix: "tv", + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + prefix: "tv", + md: metadata.Pairs("th", "tv123"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + prefix: "tv", + md: metadata.Pairs("th", "abc"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hpm := newHeaderPrefixMatcher(tt.key, tt.prefix) - if got := hpm.match(tt.md); got != tt.want { + hpm := NewHeaderPrefixMatcher(tt.key, tt.prefix, tt.invert) + if got := hpm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) @@ -264,6 +403,7 @@ func TestHeaderSuffixMatcherMatch(t *testing.T) { key, suffix string md metadata.MD want bool + invert bool }{ { name: "one value one match", @@ -293,40 +433,116 @@ func TestHeaderSuffixMatcherMatch(t *testing.T) { md: metadata.Pairs("th", "abc"), want: false, }, + { + name: "invert header not present", + key: "th", + suffix: "tv", + md: metadata.Pairs(":method", "GET"), + want: false, + invert: true, + }, + { + name: "invert header match", + key: "th", + suffix: "tv", + md: metadata.Pairs("th", "123tv"), + want: false, + invert: true, + }, + { + name: "invert header not match", + key: "th", + suffix: "tv", + md: metadata.Pairs("th", "abc"), + want: true, + invert: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - hsm := newHeaderSuffixMatcher(tt.key, tt.suffix) - if got := hsm.match(tt.md); got != tt.want { + hsm := NewHeaderSuffixMatcher(tt.key, tt.suffix, tt.invert) + if got := hsm.Match(tt.md); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) } } -func TestInvertMatcherMatch(t *testing.T) { +func TestHeaderStringMatch(t *testing.T) { tests := []struct { - name string - m headerMatcherInterface - md metadata.MD + name string + key string + sm StringMatcher + invert bool + md metadata.MD + want bool }{ { - name: "true->false", - m: newHeaderExactMatcher("th", "tv"), - md: metadata.Pairs("th", "tv"), + name: "should-match", + key: "th", + sm: StringMatcher{ + exactMatch: newStringP("tv"), + }, + invert: false, + md: metadata.Pairs("th", "tv"), + want: true, + }, + { + name: "not match", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: false, + md: metadata.Pairs("th", "not-match"), + want: false, + }, + { + name: "invert string match", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: true, + md: metadata.Pairs("th", "not-match"), + want: true, + }, + { + name: "header missing", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: false, + md: metadata.Pairs("not-specified-key", "not-match"), + want: false, }, { - name: "false->true", - m: newHeaderExactMatcher("th", "abc"), - md: metadata.Pairs("th", "tv"), + name: "header missing invert true", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: true, + md: metadata.Pairs("not-specified-key", "not-match"), + want: false, + }, + { + name: "header empty string invert", + key: "th", + sm: StringMatcher{ + containsMatch: newStringP("tv"), + }, + invert: true, + md: metadata.Pairs("th", ""), + want: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := newInvertMatcher(tt.m).match(tt.md) - want := !tt.m.match(tt.md) - if got != want { - t.Errorf("match() = %v, want %v", got, want) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hsm := NewHeaderStringMatcher(test.key, test.sm, test.invert) + if got := hsm.Match(test.md); got != test.want { + t.Errorf("match() = %v, want %v", got, test.want) } }) } diff --git a/internal/xds/string_matcher.go b/internal/xds/matcher/string_matcher.go similarity index 96% rename from internal/xds/string_matcher.go rename to internal/xds/matcher/string_matcher.go index 21f15aad1b88..c138f78735bc 100644 --- a/internal/xds/string_matcher.go +++ b/internal/xds/matcher/string_matcher.go @@ -16,9 +16,9 @@ * */ -// Package xds contains types that need to be shared between code under +// Package matcher contains types that need to be shared between code under // google.golang.org/grpc/xds/... and the rest of gRPC. -package xds +package matcher import ( "errors" @@ -27,6 +27,7 @@ import ( "strings" v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + "google.golang.org/grpc/internal/grpcutil" ) // StringMatcher contains match criteria for matching a string, and is an @@ -58,7 +59,7 @@ func (sm StringMatcher) Match(input string) bool { case sm.suffixMatch != nil: return strings.HasSuffix(input, *sm.suffixMatch) case sm.regexMatch != nil: - return sm.regexMatch.MatchString(input) + return grpcutil.FullMatchWithRegex(sm.regexMatch, input) case sm.containsMatch != nil: return strings.Contains(input, *sm.containsMatch) } diff --git a/internal/xds/string_matcher_test.go b/internal/xds/matcher/string_matcher_test.go similarity index 97% rename from internal/xds/string_matcher_test.go rename to internal/xds/matcher/string_matcher_test.go index 7908ac974b23..9528b57e44a5 100644 --- a/internal/xds/string_matcher_test.go +++ b/internal/xds/matcher/string_matcher_test.go @@ -16,7 +16,7 @@ * */ -package xds +package matcher import ( "regexp" @@ -67,13 +67,6 @@ func TestStringMatcherFromProto(t *testing.T) { }, wantErr: true, }, - { - desc: "invalid deprecated regex", - inputProto: &v3matcherpb.StringMatcher{ - MatchPattern: &v3matcherpb.StringMatcher_HiddenEnvoyDeprecatedRegex{}, - }, - wantErr: true, - }, { desc: "happy case exact", inputProto: &v3matcherpb.StringMatcher{ @@ -273,6 +266,12 @@ func TestMatch(t *testing.T) { input: "goodregex", wantMatch: true, }, + { + desc: "regex match failure because only part match", + matcher: regexMatcher, + input: "goodregexa", + wantMatch: false, + }, { desc: "regex match failure", matcher: regexMatcher, diff --git a/internal/xds/rbac/converter.go b/internal/xds/rbac/converter.go new file mode 100644 index 000000000000..713e39cf31cb --- /dev/null +++ b/internal/xds/rbac/converter.go @@ -0,0 +1,101 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "encoding/json" + "fmt" + "strings" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3auditloggersstreampb "github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/authz/audit/stdout" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig) (audit.Logger, error) { + if loggerConfig.GetAuditLogger().GetTypedConfig() == nil { + return nil, fmt.Errorf("missing required field: TypedConfig") + } + customConfig, loggerName, err := getCustomConfig(loggerConfig.AuditLogger.TypedConfig) + if err != nil { + return nil, err + } + if loggerName == "" { + return nil, fmt.Errorf("field TypedConfig.TypeURL cannot be an empty string") + } + factory := audit.GetLoggerBuilder(loggerName) + if factory == nil { + if loggerConfig.IsOptional { + return nil, nil + } + return nil, fmt.Errorf("no builder registered for %v", loggerName) + } + auditLoggerConfig, err := factory.ParseLoggerConfig(customConfig) + if err != nil { + return nil, fmt.Errorf("custom config could not be parsed by registered factory. error: %v", err) + } + auditLogger := factory.Build(auditLoggerConfig) + return auditLogger, nil +} + +func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) { + any, err := config.UnmarshalNew() + if err != nil { + return nil, "", err + } + switch m := any.(type) { + case *v1xdsudpatypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3xdsxdstypepb.TypedStruct: + return convertCustomConfig(m.TypeUrl, m.Value) + case *v3auditloggersstreampb.StdoutAuditLog: + return convertStdoutConfig(m) + } + return nil, "", fmt.Errorf("custom config not implemented for type [%v]", config.GetTypeUrl()) +} + +func convertStdoutConfig(config *v3auditloggersstreampb.StdoutAuditLog) (json.RawMessage, string, error) { + json, err := protojson.Marshal(config) + return json, stdout.Name, err +} + +func convertCustomConfig(typeURL string, s *structpb.Struct) (json.RawMessage, string, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + urls := strings.Split(typeURL, "/") + if len(urls) == 0 { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: typeURL must have a url-like format with the typeName being the value after the last /", typeURL, s) + } + name := urls[len(urls)-1] + + rawJSON := []byte("{}") + var err error + if s != nil { + rawJSON, err = json.Marshal(s) + if err != nil { + return nil, "", fmt.Errorf("error converting custom audit logger %v for %v: %v", typeURL, s, err) + } + } + return rawJSON, name, nil +} diff --git a/internal/xds/rbac/converter_test.go b/internal/xds/rbac/converter_test.go new file mode 100644 index 000000000000..9b8004f7bd5c --- /dev/null +++ b/internal/xds/rbac/converter_test.go @@ -0,0 +1,170 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "reflect" + "strings" + "testing" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3auditloggersstreampb "github.com/envoyproxy/go-control-plane/envoy/extensions/rbac/audit_loggers/stream/v3" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/authz/audit/stdout" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/protobuf/types/known/anypb" +) + +func (s) TestBuildLoggerErrors(t *testing.T) { + tests := []struct { + name string + loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig + expectedLogger audit.Logger + expectedError string + }{ + { + name: "nil typed config", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + TypedConfig: nil, + }, + }, + expectedError: "missing required field: TypedConfig", + }, + { + name: "Unsupported Type", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: testutils.MarshalAny(&v3rbacpb.RBAC_AuditLoggingOptions{}), + }, + }, + expectedError: "custom config not implemented for type ", + }, + { + name: "Empty name", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, ""), + }, + }, + expectedError: "field TypedConfig.TypeURL cannot be an empty string", + }, + { + name: "No registered logger", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "UnregisteredLogger", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "UnregisteredLogger"), + }, + IsOptional: false, + }, + expectedError: "no builder registered for UnregisteredLogger", + }, + { + name: "fail to parse custom config", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{"abc": "BADVALUE", "xyz": "123"}, "fail to parse custom config_TestAuditLoggerCustomConfig")}, + IsOptional: false, + }, + expectedError: "custom config could not be parsed", + }, + { + name: "no registered logger but optional passes", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "UnregisteredLogger", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "no registered logger but optional passes_UnregisteredLogger"), + }, + IsOptional: true, + }, + expectedLogger: nil, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b := TestAuditLoggerCustomConfigBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b) + logger, err := buildLogger(test.loggerConfig) + if err != nil && !strings.HasPrefix(err.Error(), test.expectedError) { + t.Fatalf("expected error: %v. got error: %v", test.expectedError, err) + } + if logger != test.expectedLogger { + t.Fatalf("expected logger: %v. got logger: %v", test.expectedLogger, logger) + } + + }) + } +} + +func (s) TestBuildLoggerKnownTypes(t *testing.T) { + tests := []struct { + name string + loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig + expectedType reflect.Type + }{ + { + name: "stdout logger", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: stdout.Name, + TypedConfig: createStdoutPb(t), + }, + IsOptional: false, + }, + expectedType: reflect.TypeOf(audit.GetLoggerBuilder(stdout.Name).Build(nil)), + }, + { + name: "stdout logger with generic TypedConfig", + loggerConfig: &v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: stdout.Name, + TypedConfig: createXDSTypedStruct(t, map[string]interface{}{}, stdout.Name), + }, + IsOptional: false, + }, + expectedType: reflect.TypeOf(audit.GetLoggerBuilder(stdout.Name).Build(nil)), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + logger, err := buildLogger(test.loggerConfig) + if err != nil { + t.Fatalf("expected success. got error: %v", err) + } + loggerType := reflect.TypeOf(logger) + if test.expectedType != loggerType { + t.Fatalf("logger not of expected type. want: %v got: %v", test.expectedType, loggerType) + } + }) + } +} + +// Builds stdout config for audit logger proto. +func createStdoutPb(t *testing.T) *anypb.Any { + t.Helper() + pb := &v3auditloggersstreampb.StdoutAuditLog{} + customConfig, err := anypb.New(pb) + if err != nil { + t.Fatalf("createStdoutPb failed during anypb.New: %v", err) + } + return customConfig +} diff --git a/internal/xds/rbac/matchers.go b/internal/xds/rbac/matchers.go new file mode 100644 index 000000000000..c9f71d32cbb2 --- /dev/null +++ b/internal/xds/rbac/matchers.go @@ -0,0 +1,432 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "errors" + "fmt" + "net" + "regexp" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3route_componentspb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + internalmatcher "google.golang.org/grpc/internal/xds/matcher" +) + +// matcher is an interface that takes data about incoming RPC's and returns +// whether it matches with whatever matcher implements this interface. +type matcher interface { + match(data *rpcData) bool +} + +// policyMatcher helps determine whether an incoming RPC call matches a policy. +// A policy is a logical role (e.g. Service Admin), which is comprised of +// permissions and principals. A principal is an identity (or identities) for a +// downstream subject which are assigned the policy (role), and a permission is +// an action(s) that a principal(s) can take. A policy matches if both a +// permission and a principal match, which will be determined by the child or +// permissions and principal matchers. policyMatcher implements the matcher +// interface. +type policyMatcher struct { + permissions *orMatcher + principals *orMatcher +} + +func newPolicyMatcher(policy *v3rbacpb.Policy) (*policyMatcher, error) { + permissions, err := matchersFromPermissions(policy.Permissions) + if err != nil { + return nil, err + } + principals, err := matchersFromPrincipals(policy.Principals) + if err != nil { + return nil, err + } + return &policyMatcher{ + permissions: &orMatcher{matchers: permissions}, + principals: &orMatcher{matchers: principals}, + }, nil +} + +func (pm *policyMatcher) match(data *rpcData) bool { + // A policy matches if and only if at least one of its permissions match the + // action taking place AND at least one if its principals match the + // downstream peer. + return pm.permissions.match(data) && pm.principals.match(data) +} + +// matchersFromPermissions takes a list of permissions (can also be +// a single permission, e.g. from a not matcher which is logically !permission) +// and returns a list of matchers which correspond to that permission. This will +// be called in many instances throughout the initial construction of the RBAC +// engine from the AND and OR matchers and also from the NOT matcher. +func matchersFromPermissions(permissions []*v3rbacpb.Permission) ([]matcher, error) { + var matchers []matcher + for _, permission := range permissions { + switch permission.GetRule().(type) { + case *v3rbacpb.Permission_AndRules: + mList, err := matchersFromPermissions(permission.GetAndRules().Rules) + if err != nil { + return nil, err + } + matchers = append(matchers, &andMatcher{matchers: mList}) + case *v3rbacpb.Permission_OrRules: + mList, err := matchersFromPermissions(permission.GetOrRules().Rules) + if err != nil { + return nil, err + } + matchers = append(matchers, &orMatcher{matchers: mList}) + case *v3rbacpb.Permission_Any: + matchers = append(matchers, &alwaysMatcher{}) + case *v3rbacpb.Permission_Header: + m, err := newHeaderMatcher(permission.GetHeader()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_UrlPath: + m, err := newURLPathMatcher(permission.GetUrlPath()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_DestinationIp: + // Due to this being on server side, the destination IP is the local + // IP. + m, err := newLocalIPMatcher(permission.GetDestinationIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Permission_DestinationPort: + matchers = append(matchers, newPortMatcher(permission.GetDestinationPort())) + case *v3rbacpb.Permission_NotRule: + mList, err := matchersFromPermissions([]*v3rbacpb.Permission{{Rule: permission.GetNotRule().Rule}}) + if err != nil { + return nil, err + } + matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) + case *v3rbacpb.Permission_Metadata: + // Never matches - so no-op if not inverted, always match if + // inverted. + if permission.GetMetadata().GetInvert() { // Test metadata being no-op and also metadata with invert always matching + matchers = append(matchers, &alwaysMatcher{}) + } + case *v3rbacpb.Permission_RequestedServerName: + // Not supported in gRPC RBAC currently - a permission typed as + // requested server name in the initial config will be a no-op. + } + } + return matchers, nil +} + +func matchersFromPrincipals(principals []*v3rbacpb.Principal) ([]matcher, error) { + var matchers []matcher + for _, principal := range principals { + switch principal.GetIdentifier().(type) { + case *v3rbacpb.Principal_AndIds: + mList, err := matchersFromPrincipals(principal.GetAndIds().Ids) + if err != nil { + return nil, err + } + matchers = append(matchers, &andMatcher{matchers: mList}) + case *v3rbacpb.Principal_OrIds: + mList, err := matchersFromPrincipals(principal.GetOrIds().Ids) + if err != nil { + return nil, err + } + matchers = append(matchers, &orMatcher{matchers: mList}) + case *v3rbacpb.Principal_Any: + matchers = append(matchers, &alwaysMatcher{}) + case *v3rbacpb.Principal_Authenticated_: + authenticatedMatcher, err := newAuthenticatedMatcher(principal.GetAuthenticated()) + if err != nil { + return nil, err + } + matchers = append(matchers, authenticatedMatcher) + case *v3rbacpb.Principal_DirectRemoteIp: + m, err := newRemoteIPMatcher(principal.GetDirectRemoteIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_Header: + // Do we need an error here? + m, err := newHeaderMatcher(principal.GetHeader()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_UrlPath: + m, err := newURLPathMatcher(principal.GetUrlPath()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_NotId: + mList, err := matchersFromPrincipals([]*v3rbacpb.Principal{{Identifier: principal.GetNotId().Identifier}}) + if err != nil { + return nil, err + } + matchers = append(matchers, ¬Matcher{matcherToNot: mList[0]}) + case *v3rbacpb.Principal_SourceIp: + // The source ip principal identifier is deprecated. Thus, a + // principal typed as a source ip in the identifier will be a no-op. + // The config should use DirectRemoteIp instead. + case *v3rbacpb.Principal_RemoteIp: + // RBAC in gRPC treats direct_remote_ip and remote_ip as logically + // equivalent, as per A41. + m, err := newRemoteIPMatcher(principal.GetRemoteIp()) + if err != nil { + return nil, err + } + matchers = append(matchers, m) + case *v3rbacpb.Principal_Metadata: + // Not supported in gRPC RBAC currently - a principal typed as + // Metadata in the initial config will be a no-op. + } + } + return matchers, nil +} + +// orMatcher is a matcher where it successfully matches if one of it's +// children successfully match. It also logically represents a principal or +// permission, but can also be it's own entity further down the tree of +// matchers. orMatcher implements the matcher interface. +type orMatcher struct { + matchers []matcher +} + +func (om *orMatcher) match(data *rpcData) bool { + // Range through child matchers and pass in data about incoming RPC, and + // only one child matcher has to match to be logically successful. + for _, m := range om.matchers { + if m.match(data) { + return true + } + } + return false +} + +// andMatcher is a matcher that is successful if every child matcher +// matches. andMatcher implements the matcher interface. +type andMatcher struct { + matchers []matcher +} + +func (am *andMatcher) match(data *rpcData) bool { + for _, m := range am.matchers { + if !m.match(data) { + return false + } + } + return true +} + +// alwaysMatcher is a matcher that will always match. This logically +// represents an any rule for a permission or a principal. alwaysMatcher +// implements the matcher interface. +type alwaysMatcher struct { +} + +func (am *alwaysMatcher) match(data *rpcData) bool { + return true +} + +// notMatcher is a matcher that nots an underlying matcher. notMatcher +// implements the matcher interface. +type notMatcher struct { + matcherToNot matcher +} + +func (nm *notMatcher) match(data *rpcData) bool { + return !nm.matcherToNot.match(data) +} + +// headerMatcher is a matcher that matches on incoming HTTP Headers present +// in the incoming RPC. headerMatcher implements the matcher interface. +type headerMatcher struct { + matcher internalmatcher.HeaderMatcher +} + +func newHeaderMatcher(headerMatcherConfig *v3route_componentspb.HeaderMatcher) (*headerMatcher, error) { + var m internalmatcher.HeaderMatcher + switch headerMatcherConfig.HeaderMatchSpecifier.(type) { + case *v3route_componentspb.HeaderMatcher_ExactMatch: + m = internalmatcher.NewHeaderExactMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetExactMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_SafeRegexMatch: + regex, err := regexp.Compile(headerMatcherConfig.GetSafeRegexMatch().Regex) + if err != nil { + return nil, err + } + m = internalmatcher.NewHeaderRegexMatcher(headerMatcherConfig.Name, regex, headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_RangeMatch: + m = internalmatcher.NewHeaderRangeMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetRangeMatch().Start, headerMatcherConfig.GetRangeMatch().End, headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_PresentMatch: + m = internalmatcher.NewHeaderPresentMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPresentMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_PrefixMatch: + m = internalmatcher.NewHeaderPrefixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetPrefixMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_SuffixMatch: + m = internalmatcher.NewHeaderSuffixMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetSuffixMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_ContainsMatch: + m = internalmatcher.NewHeaderContainsMatcher(headerMatcherConfig.Name, headerMatcherConfig.GetContainsMatch(), headerMatcherConfig.InvertMatch) + case *v3route_componentspb.HeaderMatcher_StringMatch: + sm, err := internalmatcher.StringMatcherFromProto(headerMatcherConfig.GetStringMatch()) + if err != nil { + return nil, fmt.Errorf("invalid string matcher %+v: %v", headerMatcherConfig.GetStringMatch(), err) + } + m = internalmatcher.NewHeaderStringMatcher(headerMatcherConfig.Name, sm, headerMatcherConfig.InvertMatch) + default: + return nil, errors.New("unknown header matcher type") + } + return &headerMatcher{matcher: m}, nil +} + +func (hm *headerMatcher) match(data *rpcData) bool { + return hm.matcher.Match(data.md) +} + +// urlPathMatcher matches on the URL Path of the incoming RPC. In gRPC, this +// logically maps to the full method name the RPC is calling on the server side. +// urlPathMatcher implements the matcher interface. +type urlPathMatcher struct { + stringMatcher internalmatcher.StringMatcher +} + +func newURLPathMatcher(pathMatcher *v3matcherpb.PathMatcher) (*urlPathMatcher, error) { + stringMatcher, err := internalmatcher.StringMatcherFromProto(pathMatcher.GetPath()) + if err != nil { + return nil, err + } + return &urlPathMatcher{stringMatcher: stringMatcher}, nil +} + +func (upm *urlPathMatcher) match(data *rpcData) bool { + return upm.stringMatcher.Match(data.fullMethod) +} + +// remoteIPMatcher and localIPMatcher both are matchers that match against +// a CIDR Range. Two different matchers are needed as the remote and destination +// ip addresses come from different parts of the data about incoming RPC's +// passed in. Matching a CIDR Range means to determine whether the IP Address +// falls within the CIDR Range or not. They both implement the matcher +// interface. +type remoteIPMatcher struct { + // ipNet represents the CidrRange that this matcher was configured with. + // This is what will remote and destination IP's will be matched against. + ipNet *net.IPNet +} + +func newRemoteIPMatcher(cidrRange *v3corepb.CidrRange) (*remoteIPMatcher, error) { + // Convert configuration to a cidrRangeString, as Go standard library has + // methods that parse cidr string. + cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) + _, ipNet, err := net.ParseCIDR(cidrRangeString) + if err != nil { + return nil, err + } + return &remoteIPMatcher{ipNet: ipNet}, nil +} + +func (sim *remoteIPMatcher) match(data *rpcData) bool { + return sim.ipNet.Contains(net.IP(net.ParseIP(data.peerInfo.Addr.String()))) +} + +type localIPMatcher struct { + ipNet *net.IPNet +} + +func newLocalIPMatcher(cidrRange *v3corepb.CidrRange) (*localIPMatcher, error) { + cidrRangeString := fmt.Sprintf("%s/%d", cidrRange.AddressPrefix, cidrRange.PrefixLen.Value) + _, ipNet, err := net.ParseCIDR(cidrRangeString) + if err != nil { + return nil, err + } + return &localIPMatcher{ipNet: ipNet}, nil +} + +func (dim *localIPMatcher) match(data *rpcData) bool { + return dim.ipNet.Contains(net.IP(net.ParseIP(data.localAddr.String()))) +} + +// portMatcher matches on whether the destination port of the RPC matches the +// destination port this matcher was instantiated with. portMatcher +// implements the matcher interface. +type portMatcher struct { + destinationPort uint32 +} + +func newPortMatcher(destinationPort uint32) *portMatcher { + return &portMatcher{destinationPort: destinationPort} +} + +func (pm *portMatcher) match(data *rpcData) bool { + return data.destinationPort == pm.destinationPort +} + +// authenticatedMatcher matches on the name of the Principal. If set, the URI +// SAN or DNS SAN in that order is used from the certificate, otherwise the +// subject field is used. If unset, it applies to any user that is +// authenticated. authenticatedMatcher implements the matcher interface. +type authenticatedMatcher struct { + stringMatcher *internalmatcher.StringMatcher +} + +func newAuthenticatedMatcher(authenticatedMatcherConfig *v3rbacpb.Principal_Authenticated) (*authenticatedMatcher, error) { + // Represents this line in the RBAC documentation = "If unset, it applies to + // any user that is authenticated" (see package-level comments). + if authenticatedMatcherConfig.PrincipalName == nil { + return &authenticatedMatcher{}, nil + } + stringMatcher, err := internalmatcher.StringMatcherFromProto(authenticatedMatcherConfig.PrincipalName) + if err != nil { + return nil, err + } + return &authenticatedMatcher{stringMatcher: &stringMatcher}, nil +} + +func (am *authenticatedMatcher) match(data *rpcData) bool { + if data.authType != "tls" { + // Connection is not authenticated. + return false + } + if am.stringMatcher == nil { + // Allows any authenticated user. + return true + } + // "If there is no client certificate (thus no SAN nor Subject), check if "" + // (empty string) matches. If it matches, the principal_name is said to + // match" - A41 + if len(data.certs) == 0 { + return am.stringMatcher.Match("") + } + cert := data.certs[0] + // The order of matching as per the RBAC documentation (see package-level comments) + // is as follows: URI SANs, DNS SANs, and then subject name. + for _, uriSAN := range cert.URIs { + if am.stringMatcher.Match(uriSAN.String()) { + return true + } + } + for _, dnsSAN := range cert.DNSNames { + if am.stringMatcher.Match(dnsSAN) { + return true + } + } + return am.stringMatcher.Match(cert.Subject.String()) +} diff --git a/internal/xds/rbac/rbac_engine.go b/internal/xds/rbac/rbac_engine.go new file mode 100644 index 000000000000..63237affe23f --- /dev/null +++ b/internal/xds/rbac/rbac_engine.go @@ -0,0 +1,317 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package rbac provides service-level and method-level access control for a +// service. See +// https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/rbac/v3/rbac.proto#role-based-access-control-rbac +// for documentation. +package rbac + +import ( + "context" + "crypto/x509" + "errors" + "fmt" + "net" + "strconv" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +var logger = grpclog.Component("rbac") + +var getConnection = transport.GetConnection + +// ChainEngine represents a chain of RBAC Engines, used to make authorization +// decisions on incoming RPCs. +type ChainEngine struct { + chainedEngines []*engine +} + +// NewChainEngine returns a chain of RBAC engines, used to make authorization +// decisions on incoming RPCs. Returns a non-nil error for invalid policies. +func NewChainEngine(policies []*v3rbacpb.RBAC, policyName string) (*ChainEngine, error) { + engines := make([]*engine, 0, len(policies)) + for _, policy := range policies { + engine, err := newEngine(policy, policyName) + if err != nil { + return nil, err + } + engines = append(engines, engine) + } + return &ChainEngine{chainedEngines: engines}, nil +} + +func (cre *ChainEngine) logRequestDetails(rpcData *rpcData) { + if logger.V(2) { + logger.Infof("checking request: url path=%s", rpcData.fullMethod) + if len(rpcData.certs) > 0 { + cert := rpcData.certs[0] + logger.Infof("uri sans=%q, dns sans=%q, subject=%v", cert.URIs, cert.DNSNames, cert.Subject) + } + } +} + +// IsAuthorized determines if an incoming RPC is authorized based on the chain of RBAC +// engines and their associated actions. +// +// Errors returned by this function are compatible with the status package. +func (cre *ChainEngine) IsAuthorized(ctx context.Context) error { + // This conversion step (i.e. pulling things out of ctx) can be done once, + // and then be used for the whole chain of RBAC Engines. + rpcData, err := newRPCData(ctx) + if err != nil { + logger.Errorf("newRPCData: %v", err) + return status.Errorf(codes.Internal, "gRPC RBAC: %v", err) + } + for _, engine := range cre.chainedEngines { + matchingPolicyName, ok := engine.findMatchingPolicy(rpcData) + if logger.V(2) && ok { + logger.Infof("incoming RPC matched to policy %v in engine with action %v", matchingPolicyName, engine.action) + } + + switch { + case engine.action == v3rbacpb.RBAC_ALLOW && !ok: + cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) + return status.Errorf(codes.PermissionDenied, "incoming RPC did not match an allow policy") + case engine.action == v3rbacpb.RBAC_DENY && ok: + cre.logRequestDetails(rpcData) + engine.doAuditLogging(rpcData, matchingPolicyName, false) + return status.Errorf(codes.PermissionDenied, "incoming RPC matched a deny policy %q", matchingPolicyName) + } + // Every policy in the engine list must be queried. Thus, iterate to the + // next policy. + engine.doAuditLogging(rpcData, matchingPolicyName, true) + } + // If the incoming RPC gets through all of the engines successfully (i.e. + // doesn't not match an allow or match a deny engine), the RPC is authorized + // to proceed. + return nil +} + +// engine is used for matching incoming RPCs to policies. +type engine struct { + // TODO(gtcooke94) - differentiate between `policyName`, `policies`, and `rules` + policyName string + policies map[string]*policyMatcher + // action must be ALLOW or DENY. + action v3rbacpb.RBAC_Action + auditLoggers []audit.Logger + auditCondition v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition +} + +// newEngine creates an RBAC Engine based on the contents of a policy. Returns a +// non-nil error if the policy is invalid. +func newEngine(config *v3rbacpb.RBAC, policyName string) (*engine, error) { + a := config.GetAction() + if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY { + return nil, fmt.Errorf("unsupported action %s", config.Action) + } + + policies := make(map[string]*policyMatcher, len(config.GetPolicies())) + for name, policy := range config.GetPolicies() { + matcher, err := newPolicyMatcher(policy) + if err != nil { + return nil, err + } + policies[name] = matcher + } + + auditLoggers, auditCondition, err := parseAuditOptions(config.GetAuditLoggingOptions()) + if err != nil { + return nil, err + } + return &engine{ + policyName: policyName, + policies: policies, + action: a, + auditLoggers: auditLoggers, + auditCondition: auditCondition, + }, nil +} + +func parseAuditOptions(opts *v3rbacpb.RBAC_AuditLoggingOptions) ([]audit.Logger, v3rbacpb.RBAC_AuditLoggingOptions_AuditCondition, error) { + if opts == nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, nil + } + var auditLoggers []audit.Logger + for _, logger := range opts.LoggerConfigs { + auditLogger, err := buildLogger(logger) + if err != nil { + return nil, v3rbacpb.RBAC_AuditLoggingOptions_NONE, err + } + if auditLogger == nil { + // This occurs when the audit logger is not registered but also + // marked optional. + continue + } + auditLoggers = append(auditLoggers, auditLogger) + } + return auditLoggers, opts.GetAuditCondition(), nil + +} + +// findMatchingPolicy determines if an incoming RPC matches a policy. On a +// successful match, it returns the name of the matching policy and a true bool +// to specify that there was a matching policy found. It returns false in +// the case of not finding a matching policy. +func (e *engine) findMatchingPolicy(rpcData *rpcData) (string, bool) { + for policy, matcher := range e.policies { + if matcher.match(rpcData) { + return policy, true + } + } + return "", false +} + +// newRPCData takes an incoming context (should be a context representing state +// needed for server RPC Call with metadata, peer info (used for source ip/port +// and TLS information) and connection (used for destination ip/port) piped into +// it) and the method name of the Service being called server side and populates +// an rpcData struct ready to be passed to the RBAC Engine to find a matching +// policy. +func newRPCData(ctx context.Context) (*rpcData, error) { + // The caller should populate all of these fields (i.e. for empty headers, + // pipe an empty md into context). + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errors.New("missing metadata in incoming context") + } + // ":method can be hard-coded to POST if unavailable" - A41 + md[":method"] = []string{"POST"} + // "If the transport exposes TE in Metadata, then RBAC must special-case the + // header to treat it as not present." - A41 + delete(md, "TE") + + pi, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("missing peer info in incoming context") + } + + // The methodName will be available in the passed in ctx from a unary or streaming + // interceptor, as grpc.Server pipes in a transport stream which contains the methodName + // into contexts available in both unary or streaming interceptors. + mn, ok := grpc.Method(ctx) + if !ok { + return nil, errors.New("missing method in incoming context") + } + + // The connection is needed in order to find the destination address and + // port of the incoming RPC Call. + conn := getConnection(ctx) + if conn == nil { + return nil, errors.New("missing connection in incoming context") + } + _, dPort, err := net.SplitHostPort(conn.LocalAddr().String()) + if err != nil { + return nil, fmt.Errorf("error parsing local address: %v", err) + } + dp, err := strconv.ParseUint(dPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("error parsing local address: %v", err) + } + + var authType string + var peerCertificates []*x509.Certificate + if pi.AuthInfo != nil { + tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo) + if ok { + authType = pi.AuthInfo.AuthType() + peerCertificates = tlsInfo.State.PeerCertificates + } + } + + return &rpcData{ + md: md, + peerInfo: pi, + fullMethod: mn, + destinationPort: uint32(dp), + localAddr: conn.LocalAddr(), + authType: authType, + certs: peerCertificates, + }, nil +} + +// rpcData wraps data pulled from an incoming RPC that the RBAC engine needs to +// find a matching policy. +type rpcData struct { + // md is the HTTP Headers that are present in the incoming RPC. + md metadata.MD + // peerInfo is information about the downstream peer. + peerInfo *peer.Peer + // fullMethod is the method name being called on the upstream service. + fullMethod string + // destinationPort is the port that the RPC is being sent to on the + // server. + destinationPort uint32 + // localAddr is the address that the RPC is being sent to. + localAddr net.Addr + // authType is the type of authentication e.g. "tls". + authType string + // certs are the certificates presented by the peer during a TLS + // handshake. + certs []*x509.Certificate +} + +func (e *engine) doAuditLogging(rpcData *rpcData, rule string, authorized bool) { + // In the RBAC world, we need to have a SPIFFE ID as the principal for this + // to be meaningful + principal := "" + if rpcData.peerInfo != nil && rpcData.peerInfo.AuthInfo != nil && rpcData.peerInfo.AuthInfo.AuthType() == "tls" { + // If AuthType = tls, then we can cast AuthInfo to TLSInfo. + tlsInfo := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo) + if tlsInfo.SPIFFEID != nil { + principal = tlsInfo.SPIFFEID.String() + } + } + + //TODO(gtcooke94) check if we need to log before creating the event + event := &audit.Event{ + FullMethodName: rpcData.fullMethod, + Principal: principal, + PolicyName: e.policyName, + MatchedRule: rule, + Authorized: authorized, + } + for _, logger := range e.auditLoggers { + switch e.auditCondition { + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY: + if !authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW: + if authorized { + logger.Log(event) + } + case v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW: + logger.Log(event) + } + } +} + +// This is used when converting a custom config from raw JSON to a TypedStruct. +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/". +const typeURLPrefix = "grpc.authz.audit_logging/" diff --git a/internal/xds/rbac/rbac_engine_test.go b/internal/xds/rbac/rbac_engine_test.go new file mode 100644 index 000000000000..94464cf184ab --- /dev/null +++ b/internal/xds/rbac/rbac_engine_test.go @@ -0,0 +1,1930 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rbac + +import ( + "context" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "fmt" + "net" + "net/url" + "reflect" + "testing" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type addr struct { + ipAddress string +} + +func (addr) Network() string { return "" } +func (a *addr) String() string { return a.ipAddress } + +// TestNewChainEngine tests the construction of the ChainEngine. Due to some +// types of RBAC configuration being logically wrong and returning an error +// rather than successfully constructing the RBAC Engine, this test tests both +// RBAC Configurations deemed successful and also RBAC Configurations that will +// raise errors. +func (s) TestNewChainEngine(t *testing.T) { + tests := []struct { + name string + policies []*v3rbacpb.RBAC + wantErr bool + policyName string + }{ + { + name: "SuccessCaseAnyMatchSingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + { + name: "SuccessCaseAnyMatchMultiple", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + { + name: "SuccessCaseSimplePolicySingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + // SuccessCaseSimplePolicyTwoPolicies tests the construction of the + // chained engines in the case where there are two policies in a list, + // one with an allow policy and one with a deny policy. A situation + // where two policies (allow and deny) is a very common use case for + // this API, and should successfully build. + { + name: "SuccessCaseSimplePolicyTwoPolicies", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + { + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 8080}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + { + name: "SuccessCaseEnvoyExampleSingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "service-admin": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/admin"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "cluster.local/ns/default/sa/superuser"}}}}}, + }, + }, + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/products"}}}}}}, + {Rule: &v3rbacpb.Permission_OrRules{OrRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 80}}, + {Rule: &v3rbacpb.Permission_DestinationPort{DestinationPort: 443}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + { + name: "SourceIpMatcherSuccessSingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + }, + }, + { + name: "SourceIpMatcherFailureSingular", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "DestinationIpMatcherSuccess", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + { + name: "DestinationIpMatcherFailure", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "MatcherToNotPolicy", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "not-secret-content": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_NotRule{NotRule: &v3rbacpb.Permission{Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/secret-content"}}}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + { + name: "MatcherToNotPrinicipal", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "not-from-certain-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_NotId{NotId: &v3rbacpb.Principal{Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}}}, + }, + }, + }, + }, + }, + }, + // PrinicpalProductViewer tests the construction of a chained engine + // with a policy that allows any downstream to send a GET request on a + // certain path. + { + name: "PrincipalProductViewer", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + { + Identifier: &v3rbacpb.Principal_AndIds{AndIds: &v3rbacpb.Principal_Set{Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{ + Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/books"}}}}}}, + {Identifier: &v3rbacpb.Principal_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/cars"}}}}}}, + }, + }}}, + }}}, + }, + }, + }, + }, + }, + }, + }, + // Certain Headers tests the construction of a chained engine with a + // policy that allows any downstream to send an HTTP request with + // certain headers. + { + name: "CertainHeaders", + policies: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-headers": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + { + Identifier: &v3rbacpb.Principal_OrIds{OrIds: &v3rbacpb.Principal_Set{Ids: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "GET"}}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_RangeMatch{RangeMatch: &v3typepb.Int64Range{ + Start: 0, + End: 64, + }}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PresentMatch{PresentMatch: true}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SuffixMatch{SuffixMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ContainsMatch{ContainsMatch: "GET"}}}}, + {Identifier: &v3rbacpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ContainsMatch{ContainsMatch: "GET"}}}}, + }}}, + }, + }, + }, + }, + }, + }, + }, + { + name: "LogAction", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_LOG, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "ActionNotSpecified", + policies: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + }, + { + name: "SimpleAuditLogger", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "SimpleAuditLogger_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + }, + { + name: "AuditLoggerCustomConfig", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "AuditLoggerCustomConfig_TestAuditLoggerCustomConfig")}, + IsOptional: false, + }, + }, + }, + }, + }, + policyName: "test_policy", + }, + { + name: "AuditLoggerCustomConfigXdsTypedStruct", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createXDSTypedStruct(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "AuditLoggerCustomConfigXdsTypedStruct_TestAuditLoggerCustomConfig")}, + IsOptional: false, + }, + }, + }, + }, + }, + policyName: "test_policy", + }, + { + name: "Missing Optional AuditLogger doesn't fail", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "UnsupportedLogger", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "Missing Optional AuditLogger doesn't fail_UnsupportedLogger")}, + IsOptional: true, + }, + }, + }, + }, + }, + }, + { + name: "Missing Non-Optional AuditLogger fails", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "UnsupportedLogger", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "Missing Non-Optional AuditLogger fails_UnsupportedLogger")}, + IsOptional: false, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Cannot_parse_missing_CustomConfig", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + }, + IsOptional: false, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Cannot_parse_bad_CustomConfig", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{"abc": "BADVALUE", "xyz": "123"}, "Cannot_parse_bad_CustomConfig_TestAuditLoggerCustomConfig")}, + IsOptional: false, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Cannot_parse_missing_typedConfig_name", + policies: []*v3rbacpb.RBAC{ + { + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerCustomConfig", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{"abc": 123, "xyz": "123"}, "")}, + IsOptional: false, + }, + }, + }, + }, + }, + wantErr: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b := TestAuditLoggerBufferBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b) + b2 := TestAuditLoggerCustomConfigBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b2) + if _, err := NewChainEngine(test.policies, test.policyName); (err != nil) != test.wantErr { + t.Fatalf("NewChainEngine(%+v) returned err: %v, wantErr: %v", test.policies, err, test.wantErr) + } + }) + } +} + +type rbacQuery struct { + rpcData *rpcData + wantStatusCode codes.Code + wantAuditEvents []*audit.Event +} + +// TestChainEngine tests the chain of RBAC Engines by configuring the chain of +// engines in a certain way in different scenarios. After configuring the chain +// of engines in a certain way, this test pings the chain of engines with +// different types of data representing incoming RPC's (piped into a context), +// and verifies that it works as expected. +func (s) TestChainEngine(t *testing.T) { + defer func(gc func(ctx context.Context) net.Conn) { + getConnection = gc + }(getConnection) + tests := []struct { + name string + rbacConfigs []*v3rbacpb.RBAC + rbacQueries []rbacQuery + policyName string + }{ + // SuccessCaseAnyMatch tests a single RBAC Engine instantiated with + // a config with a policy with any rules for both permissions and + // principals, meaning that any data about incoming RPC's that the RBAC + // Engine is queried with should match that policy. + { + name: "SuccessCaseAnyMatch", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + { + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + }, + }, + // SuccessCaseSimplePolicy is a test that tests a single policy + // that only allows an rpc to proceed if the rpc is calling with a certain + // path. + { + name: "SuccessCaseSimplePolicy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the local host fan policy. Thus, + // this RPC should be allowed to proceed. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + + // This RPC shouldn't match with the local host fan policy. Thus, + // this rpc shouldn't be allowed to proceed. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + // SuccessCaseEnvoyExample is a test based on the example provided + // in the EnvoyProxy docs. The RBAC Config contains two policies, + // service admin and product viewer, that provides an example of a real + // RBAC Config that might be configured for a given for a given backend + // service. + { + name: "SuccessCaseEnvoyExample", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "service-admin": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "//cluster.local/ns/default/sa/admin"}}}}}, + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "//cluster.local/ns/default/sa/superuser"}}}}}, + }, + }, + "product-viewer": { + Permissions: []*v3rbacpb.Permission{ + { + Rule: &v3rbacpb.Permission_AndRules{AndRules: &v3rbacpb.Permission_Set{ + Rules: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "GET"}}}}, + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/products"}}}}}}, + }, + }, + }, + }, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This incoming RPC Call should match with the service admin + // policy. + { + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + URIs: []*url.URL{ + { + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + }, + }, + }, + }, + wantStatusCode: codes.OK, + }, + // These incoming RPC calls should not match any policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + { + rpcData: &rpcData{ + fullMethod: "get-product-list", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + Subject: pkix.Name{ + CommonName: "localhost", + }, + }, + }, + }, + }, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + { + name: "NotMatcher", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "not-secret-content": { + Permissions: []*v3rbacpb.Permission{ + { + Rule: &v3rbacpb.Permission_NotRule{ + NotRule: &v3rbacpb.Permission{Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "/secret-content"}}}}}}, + }, + }, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This incoming RPC Call should match with the not-secret-content policy. + { + rpcData: &rpcData{ + fullMethod: "/regular-content", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This incoming RPC Call shouldn't match with the not-secret-content-policy. + { + rpcData: &rpcData{ + fullMethod: "/secret-content", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + { + name: "DirectRemoteIpMatcher", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-direct-remote-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This incoming RPC Call should match with the certain-direct-remote-ip policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This incoming RPC Call shouldn't match with the certain-direct-remote-ip policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + // This test tests a RBAC policy configured with a remote-ip policy. + // This should be logically equivalent to configuring a Engine with a + // direct-remote-ip policy, as per A41 - "allow equating RBAC's + // direct_remote_ip and remote_ip." + { + name: "RemoteIpMatcher", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-remote-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_RemoteIp{RemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This incoming RPC Call should match with the certain-remote-ip policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This incoming RPC Call shouldn't match with the certain-remote-ip policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + { + name: "DestinationIpMatcher", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-destination-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This incoming RPC Call shouldn't match with the + // certain-destination-ip policy, as the test listens on local + // host. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + // AllowAndDenyPolicy tests a policy with an allow (on path) and + // deny (on port) policy chained together. This represents how a user + // configured interceptor would use this, and also is a potential + // configuration for a dynamic xds interceptor. + { + name: "AllowAndDenyPolicy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + // This test tests that when there are no SANs or Subject's + // distinguished name in incoming RPC's, that authenticated matchers + // match against the empty string. + { + name: "default-matching-no-credentials", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "service-admin": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Authenticated_{Authenticated: &v3rbacpb.Principal_Authenticated{PrincipalName: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: ""}}}}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This incoming RPC Call should match with the service admin + // policy. No authentication info is provided, so the + // authenticated matcher should match to the string matcher on + // the empty string, matching to the service-admin policy. + { + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + URIs: []*url.URL{ + { + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + }, + }, + }, + }, + wantStatusCode: codes.OK, + }, + }, + }, + // This test tests that an RBAC policy configured with a metadata + // matcher as a permission doesn't match with any incoming RPC. + { + name: "metadata-never-matches", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "metadata-never-matches": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Metadata{ + Metadata: &v3matcherpb.MetadataMatcher{}, + }}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + { + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + // This test tests that an RBAC policy configured with a metadata + // matcher with invert set to true as a permission always matches with + // any incoming RPC. + { + name: "metadata-invert-always-matches", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "metadata-invert-always-matches": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Metadata{ + Metadata: &v3matcherpb.MetadataMatcher{Invert: true}, + }}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + { + rpcData: &rpcData{ + fullMethod: "some method", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + }, + }, + // AllowAndDenyPolicy tests a policy with an allow (on path) and + // deny (on port) policy chained together. This represents how a user + // configured interceptor would use this, and also is a potential + // configuration for a dynamic xds interceptor. Further, it tests that + // the audit logger works properly in each scenario. + { + name: "AuditLoggingAllowAndDenyPolicy_ON_ALLOW", + policyName: "test_policy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_ALLOW_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_ALLOW_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + { + rpcData: &rpcData{ + fullMethod: "", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + URIs: []*url.URL{ + { + Scheme: "spiffe", + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + }, + SPIFFEID: &url.URL{ + Scheme: "spiffe", + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + wantStatusCode: codes.OK, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "", + Principal: "spiffe://cluster.local/ns/default/sa/admin", + PolicyName: "test_policy", + MatchedRule: "certain-source-ip", + Authorized: true, + }, + }, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + { + name: "AuditLoggingAllowAndDenyPolicy_ON_DENY", + policyName: "test_policy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_DENY_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_DENY_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + // Audit logging matches with nothing. + { + rpcData: &rpcData{ + fullMethod: "", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + // Audit logging matches with deny and short circuits. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + AuthInfo: credentials.TLSInfo{ + State: tls.ConnectionState{ + PeerCertificates: []*x509.Certificate{ + { + URIs: []*url.URL{ + { + Host: "cluster.local", + Path: "/ns/default/sa/admin", + }, + }, + }, + }, + }, + }, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "localhost-fan-page", + PolicyName: "test_policy", + MatchedRule: "localhost-fan", + Authorized: false, + }, + }, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + // Audit logging matches with the allow policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "", + PolicyName: "test_policy", + MatchedRule: "", + Authorized: false, + }, + }, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + // Audit logging will have the deny logged. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "localhost-fan-page", + PolicyName: "test_policy", + MatchedRule: "localhost-fan", + Authorized: false, + }, + }, + }, + }, + }, + { + name: "AuditLoggingAllowAndDenyPolicy_NONE", + policyName: "test_policy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_NONE_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_NONE, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_NONE_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + // Audit logging is NONE. + { + rpcData: &rpcData{ + fullMethod: "", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + // Audit logging is NONE. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + // Audit logging is NONE. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + // Audit logging is NONE. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + }, + }, + }, + { + name: "AuditLoggingAllowAndDenyPolicy_ON_DENY_AND_ALLOW", + policyName: "test_policy", + rbacConfigs: []*v3rbacpb.RBAC{ + { + Policies: map[string]*v3rbacpb.Policy{ + "localhost-fan": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "localhost-fan-page"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + Action: v3rbacpb.RBAC_DENY, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_DENY_AND_ALLOW_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + { + Policies: map[string]*v3rbacpb.Policy{ + "certain-source-ip": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_DirectRemoteIp{DirectRemoteIp: &v3corepb.CidrRange{AddressPrefix: "0.0.0.0", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + }, + }, + Action: v3rbacpb.RBAC_ALLOW, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_DENY_AND_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + {AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "TestAuditLoggerBuffer", + TypedConfig: createUDPATypedStruct(t, map[string]interface{}{}, "AuditLoggingAllowAndDenyPolicy_ON_DENY_AND_ALLOW_TestAuditLoggerBuffer")}, + IsOptional: false, + }, + }, + }, + }, + }, + rbacQueries: []rbacQuery{ + // This RPC should match with the allow policy, and shouldn't + // match with the deny and thus should be allowed to proceed. + // Audit logging matches with nothing. + { + rpcData: &rpcData{ + fullMethod: "", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.OK, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "", + PolicyName: "test_policy", + MatchedRule: "certain-source-ip", + Authorized: true, + }, + }, + }, + // This RPC should match with both the allow policy and deny policy + // and thus shouldn't be allowed to proceed as matched with deny. + // Audit logging matches with deny and short circuits. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "0.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "localhost-fan-page", + PolicyName: "test_policy", + MatchedRule: "localhost-fan", + Authorized: false, + }, + }, + }, + // This RPC shouldn't match with either policy, and thus + // shouldn't be allowed to proceed as didn't match with allow. + // Audit logging matches with the allow policy. + { + rpcData: &rpcData{ + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "", + PolicyName: "test_policy", + MatchedRule: "", + Authorized: false, + }, + }, + }, + // This RPC shouldn't match with allow, match with deny, and + // thus shouldn't be allowed to proceed. + // Audit logging will have the deny logged. + { + rpcData: &rpcData{ + fullMethod: "localhost-fan-page", + peerInfo: &peer.Peer{ + Addr: &addr{ipAddress: "10.0.0.0"}, + }, + }, + wantStatusCode: codes.PermissionDenied, + wantAuditEvents: []*audit.Event{ + { + FullMethodName: "localhost-fan-page", + PolicyName: "test_policy", + MatchedRule: "localhost-fan", + Authorized: false, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b := TestAuditLoggerBufferBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b) + b2 := TestAuditLoggerCustomConfigBuilder{testName: test.name} + audit.RegisterLoggerBuilder(&b2) + + // Instantiate the chainedRBACEngine with different configurations that are + // interesting to test and to query. + cre, err := NewChainEngine(test.rbacConfigs, test.policyName) + if err != nil { + t.Fatalf("Error constructing RBAC Engine: %v", err) + } + // Query the created chain of RBAC Engines with different args to see + // if the chain of RBAC Engines configured as such works as intended. + for _, data := range test.rbacQueries { + func() { + // Construct the context with three data points that have enough + // information to represent incoming RPC's. This will be how a + // user uses this API. A user will have to put MD, PeerInfo, and + // the connection the RPC is sent on in the context. + ctx := metadata.NewIncomingContext(context.Background(), data.rpcData.md) + + // Make a TCP connection with a certain destination port. The + // address/port of this connection will be used to populate the + // destination ip/port in RPCData struct. This represents what + // the user of ChainEngine will have to place into context, + // as this is only way to get destination ip and port. + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error listening: %v", err) + } + defer lis.Close() + connCh := make(chan net.Conn, 1) + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("Error accepting connection: %v", err) + return + } + connCh <- conn + }() + _, err = net.Dial("tcp", lis.Addr().String()) + if err != nil { + t.Fatalf("Error dialing: %v", err) + } + conn := <-connCh + defer conn.Close() + getConnection = func(context.Context) net.Conn { + return conn + } + ctx = peer.NewContext(ctx, data.rpcData.peerInfo) + stream := &ServerTransportStreamWithMethod{ + method: data.rpcData.fullMethod, + } + + ctx = grpc.NewContextWithServerTransportStream(ctx, stream) + err = cre.IsAuthorized(ctx) + if gotCode := status.Code(err); gotCode != data.wantStatusCode { + t.Fatalf("IsAuthorized(%+v, %+v) returned (%+v), want(%+v)", ctx, data.rpcData.fullMethod, gotCode, data.wantStatusCode) + } + if !reflect.DeepEqual(b.auditEvents, data.wantAuditEvents) { + t.Fatalf("Unexpected audit event for query:%v", data) + } + + // This builder's auditEvents can be shared for several queries, make sure it's empty. + b.auditEvents = nil + }() + } + }) + } +} + +type ServerTransportStreamWithMethod struct { + method string +} + +func (sts *ServerTransportStreamWithMethod) Method() string { + return sts.method +} + +func (sts *ServerTransportStreamWithMethod) SetHeader(md metadata.MD) error { + return nil +} + +func (sts *ServerTransportStreamWithMethod) SendHeader(md metadata.MD) error { + return nil +} + +func (sts *ServerTransportStreamWithMethod) SetTrailer(md metadata.MD) error { + return nil +} + +// An audit logger that will log to the auditEvents slice. +type TestAuditLoggerBuffer struct { + auditEvents *[]*audit.Event +} + +func (logger *TestAuditLoggerBuffer) Log(e *audit.Event) { + *(logger.auditEvents) = append(*(logger.auditEvents), e) +} + +// Builds TestAuditLoggerBuffer. +type TestAuditLoggerBufferBuilder struct { + auditEvents []*audit.Event + testName string +} + +// The required config for TestAuditLoggerBuffer. +type TestAuditLoggerBufferConfig struct { + audit.LoggerConfig +} + +func (b *TestAuditLoggerBufferBuilder) ParseLoggerConfig(configJSON json.RawMessage) (config audit.LoggerConfig, err error) { + return TestAuditLoggerBufferConfig{}, nil +} + +func (b *TestAuditLoggerBufferBuilder) Build(config audit.LoggerConfig) audit.Logger { + return &TestAuditLoggerBuffer{auditEvents: &b.auditEvents} +} + +func (b *TestAuditLoggerBufferBuilder) Name() string { + return b.testName + "_TestAuditLoggerBuffer" +} + +// An audit logger to test using a custom config. +type TestAuditLoggerCustomConfig struct{} + +func (logger *TestAuditLoggerCustomConfig) Log(*audit.Event) {} + +// Build TestAuditLoggerCustomConfig. This builds a TestAuditLoggerCustomConfig +// logger that uses a custom config. +type TestAuditLoggerCustomConfigBuilder struct { + testName string +} + +// The custom config for the TestAuditLoggerCustomConfig logger. +type TestAuditLoggerCustomConfigConfig struct { + audit.LoggerConfig + Abc int + Xyz string +} + +// Parses TestAuditLoggerCustomConfigConfig. Hard-coded to match with it's test +// case above. +func (b TestAuditLoggerCustomConfigBuilder) ParseLoggerConfig(configJSON json.RawMessage) (audit.LoggerConfig, error) { + c := TestAuditLoggerCustomConfigConfig{} + err := json.Unmarshal(configJSON, &c) + if err != nil { + return nil, fmt.Errorf("could not parse custom config: %v", err) + } + return c, nil +} + +func (b *TestAuditLoggerCustomConfigBuilder) Build(config audit.LoggerConfig) audit.Logger { + return &TestAuditLoggerCustomConfig{} +} + +func (b *TestAuditLoggerCustomConfigBuilder) Name() string { + return b.testName + "_TestAuditLoggerCustomConfig" +} + +// Builds custom configs for audit logger RBAC protos. +func createUDPATypedStruct(t *testing.T, in map[string]interface{}, name string) *anypb.Any { + t.Helper() + pb, err := structpb.NewStruct(in) + if err != nil { + t.Fatalf("createUDPATypedStructFailed during structpb.NewStruct: %v", err) + } + typedURL := "" + if name != "" { + typedURL = typeURLPrefix + name + } + typedStruct := &v1xdsudpatypepb.TypedStruct{ + TypeUrl: typedURL, + Value: pb, + } + customConfig, err := anypb.New(typedStruct) + if err != nil { + t.Fatalf("createUDPATypedStructFailed during anypb.New: %v", err) + } + return customConfig +} + +// Builds custom configs for audit logger RBAC protos. +func createXDSTypedStruct(t *testing.T, in map[string]interface{}, name string) *anypb.Any { + t.Helper() + pb, err := structpb.NewStruct(in) + if err != nil { + t.Fatalf("createXDSTypedStructFailed during structpb.NewStruct: %v", err) + } + typedStruct := &v3xdsxdstypepb.TypedStruct{ + TypeUrl: typeURLPrefix + name, + Value: pb, + } + customConfig, err := anypb.New(typedStruct) + if err != nil { + t.Fatalf("createXDSTypedStructFailed during anypb.New: %v", err) + } + return customConfig +} diff --git a/internal/xds_handshake_cluster.go b/internal/xds_handshake_cluster.go new file mode 100644 index 000000000000..e8b492774d1a --- /dev/null +++ b/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/interop/client/client.go b/interop/client/client.go index 975e0b5d2f3c..8238e0a106d7 100644 --- a/interop/client/client.go +++ b/interop/client/client.go @@ -17,24 +17,39 @@ */ // Binary client is an interop client. +// +// See interop test case descriptions [here]. +// +// [here]: https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md package main import ( + "context" + "crypto/tls" + "crypto/x509" "flag" "net" + "os" "strconv" + "strings" + "time" + "golang.org/x/oauth2" "google.golang.org/grpc" - _ "google.golang.org/grpc/balancer/grpclb" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/testdata" - _ "google.golang.org/grpc/xds/googledirectpath" + + _ "google.golang.org/grpc/balancer/grpclb" // Register the grpclb load balancing policy. + _ "google.golang.org/grpc/balancer/rls" // Register the RLS load balancing policy. + _ "google.golang.org/grpc/xds/googledirectpath" // Register xDS resolver required for c2p directpath. testgrpc "google.golang.org/grpc/interop/grpc_testing" ) @@ -45,20 +60,26 @@ const ( ) var ( - caFile = flag.String("ca_file", "", "The file containning the CA root cert file") - useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true") - useALTS = flag.Bool("use_alts", false, "Connection uses ALTS if true (this option can only be used on GCP)") - customCredentialsType = flag.String("custom_credentials_type", "", "Custom creds to use, excluding TLS or ALTS") - altsHSAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address") - testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") - serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") - oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") - defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") - serverHost = flag.String("server_host", "localhost", "The server host name") - serverPort = flag.Int("server_port", 10000, "The server port number") - serviceConfigJSON = flag.String("service_config_json", "", "Disables service config lookups and sets the provided string as the default service config.") - tlsServerName = flag.String("server_host_override", "", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") - testCase = flag.String("test_case", "large_unary", + caFile = flag.String("ca_file", "", "The file containning the CA root cert file") + useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true") + useALTS = flag.Bool("use_alts", false, "Connection uses ALTS if true (this option can only be used on GCP)") + customCredentialsType = flag.String("custom_credentials_type", "", "Custom creds to use, excluding TLS or ALTS") + altsHSAddr = flag.String("alts_handshaker_service_address", "", "ALTS handshaker gRPC service address") + testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") + serviceAccountKeyFile = flag.String("service_account_key_file", "", "Path to service account json key file") + oauthScope = flag.String("oauth_scope", "", "The scope for OAuth2 tokens") + defaultServiceAccount = flag.String("default_service_account", "", "Email of GCE default service account") + serverHost = flag.String("server_host", "localhost", "The server host name") + serverPort = flag.Int("server_port", 10000, "The server port number") + serviceConfigJSON = flag.String("service_config_json", "", "Disables service config lookups and sets the provided string as the default service config.") + soakIterations = flag.Int("soak_iterations", 10, "The number of iterations to use for the two soak tests: rpc_soak and channel_soak") + soakMaxFailures = flag.Int("soak_max_failures", 0, "The number of iterations in soak tests that are allowed to fail (either due to non-OK status code or exceeding the per-iteration max acceptable latency).") + soakPerIterationMaxAcceptableLatencyMs = flag.Int("soak_per_iteration_max_acceptable_latency_ms", 1000, "The number of milliseconds a single iteration in the two soak tests (rpc_soak and channel_soak) should take.") + soakOverallTimeoutSeconds = flag.Int("soak_overall_timeout_seconds", 10, "The overall number of seconds after which a soak test should stop and fail, if the desired number of iterations have not yet completed.") + soakMinTimeMsBetweenRPCs = flag.Int("soak_min_time_ms_between_rpcs", 0, "The minimum time in milliseconds between consecutive RPCs in a soak test (rpc_soak or channel_soak), useful for limiting QPS") + tlsServerName = flag.String("server_host_override", "", "The server name used to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") + additionalMetadata = flag.String("additional_metadata", "", "Additional metadata to send in each request, as a semicolon-separated list of key:value pairs.") + testCase = flag.String("test_case", "large_unary", `Configure different test cases. Valid options are: empty_unary : empty (zero bytes) request and response; large_unary : single request and (large) response; @@ -81,7 +102,9 @@ var ( custom_metadata: server will echo custom metadata; unimplemented_method: client attempts to call unimplemented method; unimplemented_service: client attempts to call unimplemented service; - pick_first_unary: all requests are sent to one server despite multiple servers are resolved.`) + pick_first_unary: all requests are sent to one server despite multiple servers are resolved; + orca_per_rpc: the client verifies ORCA per-RPC metrics are provided; + orca_oob: the client verifies ORCA out-of-band metrics are provided.`) logger = grpclog.Component("interop") ) @@ -96,8 +119,37 @@ const ( credsComputeEngineCreds ) +// Parses the --additional_metadata flag and returns metadata to send on each RPC, +// formatted as per https://pkg.go.dev/google.golang.org/grpc/metadata#Pairs. +// Allow any character but semicolons in values. If the flag is empty, return a nil map. +func parseAdditionalMetadataFlag() []string { + if len(*additionalMetadata) == 0 { + return nil + } + r := *additionalMetadata + addMd := make([]string, 0) + for len(r) > 0 { + i := strings.Index(r, ":") + if i < 0 { + logger.Fatalf("Error parsing --additional_metadata flag: missing colon separator") + } + addMd = append(addMd, r[:i]) // append key + r = r[i+1:] + i = strings.Index(r, ";") + // append value + if i < 0 { + addMd = append(addMd, r) + break + } + addMd = append(addMd, r[:i]) + r = r[i+1:] + } + return addMd +} + func main() { flag.Parse() + logger.Infof("Client running with test case %q", *testCase) var useGDC bool // use google default creds var useCEC bool // use compute engine creds if *customCredentialsType != "" { @@ -135,22 +187,25 @@ func main() { var opts []grpc.DialOption switch credsChosen { case credsTLS: - var sn string - if *tlsServerName != "" { - sn = *tlsServerName - } - var creds credentials.TransportCredentials + var roots *x509.CertPool if *testCA { - var err error if *caFile == "" { *caFile = testdata.Path("ca.pem") } - creds, err = credentials.NewClientTLSFromFile(*caFile, sn) + b, err := os.ReadFile(*caFile) if err != nil { - logger.Fatalf("Failed to create TLS credentials %v", err) + logger.Fatalf("Failed to read root certificate file %q: %v", *caFile, err) + } + roots = x509.NewCertPool() + if !roots.AppendCertsFromPEM(b) { + logger.Fatalf("Failed to append certificates: %s", string(b)) } + } + var creds credentials.TransportCredentials + if *tlsServerName != "" { + creds = credentials.NewClientTLSFromCert(roots, *tlsServerName) } else { - creds = credentials.NewClientTLSFromCert(nil, sn) + creds = credentials.NewTLS(&tls.Config{RootCAs: roots}) } opts = append(opts, grpc.WithTransportCredentials(creds)) case credsALTS: @@ -165,7 +220,7 @@ func main() { case credsComputeEngineCreds: opts = append(opts, grpc.WithCredentialsBundle(google.NewComputeEngineCredentials())) case credsNone: - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) default: logger.Fatal("Invalid creds") } @@ -185,13 +240,23 @@ func main() { } opts = append(opts, grpc.WithPerRPCCredentials(jwtCreds)) } else if *testCase == "oauth2_auth_token" { - opts = append(opts, grpc.WithPerRPCCredentials(oauth.NewOauthAccess(interop.GetToken(*serviceAccountKeyFile, *oauthScope)))) + opts = append(opts, grpc.WithPerRPCCredentials(oauth.TokenSource{TokenSource: oauth2.StaticTokenSource(interop.GetToken(*serviceAccountKeyFile, *oauthScope))})) } } if len(*serviceConfigJSON) > 0 { opts = append(opts, grpc.WithDisableServiceConfig(), grpc.WithDefaultServiceConfig(*serviceConfigJSON)) } - opts = append(opts, grpc.WithBlock()) + if addMd := parseAdditionalMetadataFlag(); addMd != nil { + unaryAddMd := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + ctx = metadata.AppendToOutgoingContext(ctx, addMd...) + return invoker(ctx, method, req, reply, cc, opts...) + } + streamingAddMd := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + ctx = metadata.AppendToOutgoingContext(ctx, addMd...) + return streamer(ctx, desc, cc, method, opts...) + } + opts = append(opts, grpc.WithUnaryInterceptor(unaryAddMd), grpc.WithStreamInterceptor(streamingAddMd)) + } conn, err := grpc.Dial(serverAddr, opts...) if err != nil { logger.Fatalf("Fail to dial: %v", err) @@ -286,6 +351,18 @@ func main() { case "pick_first_unary": interop.DoPickFirstUnary(tc) logger.Infoln("PickFirstUnary done") + case "rpc_soak": + interop.DoSoakTest(tc, serverAddr, opts, false /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Duration(*soakMinTimeMsBetweenRPCs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) + logger.Infoln("RpcSoak done") + case "channel_soak": + interop.DoSoakTest(tc, serverAddr, opts, true /* resetChannel */, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Duration(*soakMinTimeMsBetweenRPCs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) + logger.Infoln("ChannelSoak done") + case "orca_per_rpc": + interop.DoORCAPerRPCTest(tc) + logger.Infoln("ORCAPerRPC done") + case "orca_oob": + interop.DoORCAOOBTest(tc) + logger.Infoln("ORCAOOB done") default: logger.Fatal("Unsupported test case: ", *testCase) } diff --git a/interop/fake_grpclb/fake_grpclb.go b/interop/fake_grpclb/fake_grpclb.go index 6804235486ba..00ae00a7f683 100644 --- a/interop/fake_grpclb/fake_grpclb.go +++ b/interop/fake_grpclb/fake_grpclb.go @@ -23,18 +23,13 @@ package main import ( "flag" - "net" - "strconv" "strings" - "time" "google.golang.org/grpc" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" - "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" + "google.golang.org/grpc/internal/testutils/fakegrpclb" "google.golang.org/grpc/testdata" ) @@ -49,76 +44,16 @@ var ( logger = grpclog.Component("interop") ) -type loadBalancerServer struct { - lbpb.UnimplementedLoadBalancerServer - serverListResponse *lbpb.LoadBalanceResponse -} - -func (l *loadBalancerServer) BalanceLoad(stream lbpb.LoadBalancer_BalanceLoadServer) error { - logger.Info("Begin handling new BalancerLoad request.") - var lbReq *lbpb.LoadBalanceRequest - var err error - if lbReq, err = stream.Recv(); err != nil { - logger.Errorf("Error receiving LoadBalanceRequest: %v", err) - return err - } - logger.Info("LoadBalancerRequest received.") - initialReq := lbReq.GetInitialRequest() - if initialReq == nil { - logger.Info("Expected first request to be an InitialRequest. Got: %v", lbReq) - return status.Error(codes.Unknown, "First request not an InitialRequest") - } - // gRPC clients targeting foo.bar.com:443 can sometimes include the ":443" suffix in - // their requested names; handle this case. TODO: make 443 configurable? - var cleanedName string - var requestedNamePortNumber string - if cleanedName, requestedNamePortNumber, err = net.SplitHostPort(initialReq.Name); err != nil { - cleanedName = initialReq.Name - } else { - if requestedNamePortNumber != "443" { - logger.Info("Bad requested service name port number: %v.", requestedNamePortNumber) - return status.Error(codes.Unknown, "Bad requested service name port number") - } - } - if cleanedName != *serviceName { - logger.Info("Expected requested service name: %v. Got: %v", *serviceName, initialReq.Name) - return status.Error(codes.NotFound, "Bad requested service name") - } - if err := stream.Send(&lbpb.LoadBalanceResponse{ - LoadBalanceResponseType: &lbpb.LoadBalanceResponse_InitialResponse{ - InitialResponse: &lbpb.InitialLoadBalanceResponse{}, - }, - }); err != nil { - logger.Errorf("Error sending initial LB response: %v", err) - return status.Error(codes.Unknown, "Error sending initial response") - } - logger.Info("Send LoadBalanceResponse: %v", l.serverListResponse) - if err := stream.Send(l.serverListResponse); err != nil { - logger.Errorf("Error sending LB response: %v", err) - return status.Error(codes.Unknown, "Error sending response") - } - if *shortStream { - return nil - } - for { - logger.Info("Send LoadBalanceResponse: %v", l.serverListResponse) - if err := stream.Send(l.serverListResponse); err != nil { - logger.Errorf("Error sending LB response: %v", err) - return status.Error(codes.Unknown, "Error sending response") - } - time.Sleep(10 * time.Second) - } -} - func main() { flag.Parse() + var opts []grpc.ServerOption if *useTLS { certFile := testdata.Path("server1.pem") keyFile := testdata.Path("server1.key") creds, err := credentials.NewServerTLSFromFile(certFile, keyFile) if err != nil { - logger.Fatalf("Failed to generate credentials %v", err) + logger.Fatalf("Failed to generate credentials: %v", err) } opts = append(opts, grpc.Creds(creds)) } else if *useALTS { @@ -126,47 +61,23 @@ func main() { altsTC := alts.NewServerCreds(altsOpts) opts = append(opts, grpc.Creds(altsTC)) } - var serverList []*lbpb.Server - if len(*backendAddrs) == 0 { - serverList = make([]*lbpb.Server, 0) - } else { - rawBackendAddrs := strings.Split(*backendAddrs, ",") - serverList = make([]*lbpb.Server, len(rawBackendAddrs)) - for i := range rawBackendAddrs { - rawIP, rawPort, err := net.SplitHostPort(rawBackendAddrs[i]) - if err != nil { - logger.Fatalf("Failed to parse --backend_addrs[%d]=%v, error: %v", i, rawBackendAddrs[i], err) - } - ip := net.ParseIP(rawIP) - if ip == nil { - logger.Fatalf("Failed to parse ip: %v", rawIP) - } - numericPort, err := strconv.Atoi(rawPort) - if err != nil { - logger.Fatalf("Failed to convert port %v to int", rawPort) - } - logger.Infof("Adding backend ip: %v, port: %d", ip.String(), numericPort) - serverList[i] = &lbpb.Server{ - IpAddress: ip, - Port: int32(numericPort), - } - } - } - serverListResponse := &lbpb.LoadBalanceResponse{ - LoadBalanceResponseType: &lbpb.LoadBalanceResponse_ServerList{ - ServerList: &lbpb.ServerList{ - Servers: serverList, - }, - }, - } - server := grpc.NewServer(opts...) - logger.Infof("Begin listening on %d.", *port) - lis, err := net.Listen("tcp", ":"+strconv.Itoa(*port)) + + rawBackendAddrs := strings.Split(*backendAddrs, ",") + server, err := fakegrpclb.NewServer(fakegrpclb.ServerParams{ + ListenPort: *port, + ServerOptions: opts, + LoadBalancedServiceName: *serviceName, + LoadBalancedServicePort: 443, // TODO: make this configurable? + BackendAddresses: rawBackendAddrs, + ShortStream: *shortStream, + }) if err != nil { - logger.Fatalf("Failed to listen on port %v: %v", *port, err) + logger.Fatalf("Failed to create balancer server: %v", err) + } + + // Serve() starts serving and blocks until Stop() is called. We don't need to + // call Stop() here since we want the server to run until we are killed. + if err := server.Serve(); err != nil { + logger.Fatalf("Failed to start balancer server: %v", err) } - lbpb.RegisterLoadBalancerServer(server, &loadBalancerServer{ - serverListResponse: serverListResponse, - }) - server.Serve(lis) } diff --git a/interop/grpc_testing/benchmark_service.pb.go b/interop/grpc_testing/benchmark_service.pb.go index a1057b8a7599..78d9e20c52b2 100644 --- a/interop/grpc_testing/benchmark_service.pb.go +++ b/interop/grpc_testing/benchmark_service.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/benchmark_service.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var File_grpc_testing_benchmark_service_proto protoreflect.FileDescriptor var file_grpc_testing_benchmark_service_proto_rawDesc = []byte{ @@ -75,8 +70,10 @@ var file_grpc_testing_benchmark_service_proto_rawDesc = []byte{ 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x2a, 0x0a, 0x0f, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x15, 0x42, + 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_grpc_testing_benchmark_service_proto_goTypes = []interface{}{ diff --git a/interop/grpc_testing/benchmark_service_grpc.pb.go b/interop/grpc_testing/benchmark_service_grpc.pb.go index 1dcba4587d29..84cd44e4d45d 100644 --- a/interop/grpc_testing/benchmark_service_grpc.pb.go +++ b/interop/grpc_testing/benchmark_service_grpc.pb.go @@ -1,4 +1,25 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/testing/benchmark_service.proto package grpc_testing @@ -14,6 +35,14 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + BenchmarkService_UnaryCall_FullMethodName = "/grpc.testing.BenchmarkService/UnaryCall" + BenchmarkService_StreamingCall_FullMethodName = "/grpc.testing.BenchmarkService/StreamingCall" + BenchmarkService_StreamingFromClient_FullMethodName = "/grpc.testing.BenchmarkService/StreamingFromClient" + BenchmarkService_StreamingFromServer_FullMethodName = "/grpc.testing.BenchmarkService/StreamingFromServer" + BenchmarkService_StreamingBothWays_FullMethodName = "/grpc.testing.BenchmarkService/StreamingBothWays" +) + // BenchmarkServiceClient is the client API for BenchmarkService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -46,7 +75,7 @@ func NewBenchmarkServiceClient(cc grpc.ClientConnInterface) BenchmarkServiceClie func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, opts...) + err := c.cc.Invoke(ctx, BenchmarkService_UnaryCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -54,7 +83,7 @@ func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleReques } func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[0], "/grpc.testing.BenchmarkService/StreamingCall", opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[0], BenchmarkService_StreamingCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -85,7 +114,7 @@ func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) { } func (c *benchmarkServiceClient) StreamingFromClient(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingFromClientClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[1], "/grpc.testing.BenchmarkService/StreamingFromClient", opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[1], BenchmarkService_StreamingFromClient_FullMethodName, opts...) if err != nil { return nil, err } @@ -119,7 +148,7 @@ func (x *benchmarkServiceStreamingFromClientClient) CloseAndRecv() (*SimpleRespo } func (c *benchmarkServiceClient) StreamingFromServer(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (BenchmarkService_StreamingFromServerClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[2], "/grpc.testing.BenchmarkService/StreamingFromServer", opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[2], BenchmarkService_StreamingFromServer_FullMethodName, opts...) if err != nil { return nil, err } @@ -151,7 +180,7 @@ func (x *benchmarkServiceStreamingFromServerClient) Recv() (*SimpleResponse, err } func (c *benchmarkServiceClient) StreamingBothWays(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingBothWaysClient, error) { - stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[3], "/grpc.testing.BenchmarkService/StreamingBothWays", opts...) + stream, err := c.cc.NewStream(ctx, &BenchmarkService_ServiceDesc.Streams[3], BenchmarkService_StreamingBothWays_FullMethodName, opts...) if err != nil { return nil, err } @@ -246,7 +275,7 @@ func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, d } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.BenchmarkService/UnaryCall", + FullMethod: BenchmarkService_UnaryCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) diff --git a/interop/grpc_testing/control.pb.go b/interop/grpc_testing/control.pb.go index 8db13921b77e..5524c8e972e8 100644 --- a/interop/grpc_testing/control.pb.go +++ b/interop/grpc_testing/control.pb.go @@ -14,16 +14,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/control.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -35,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ClientType int32 const ( @@ -299,6 +295,7 @@ type LoadParams struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Load: + // // *LoadParams_ClosedLoop // *LoadParams_Poisson Load isLoadParams_Load `protobuf_oneof:"load"` @@ -444,6 +441,7 @@ type ChannelArg struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Types that are assignable to Value: + // // *ChannelArg_StrValue // *ChannelArg_IntValue Value isChannelArg_Value `protobuf_oneof:"value"` @@ -833,6 +831,7 @@ type ClientArgs struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Argtype: + // // *ClientArgs_Setup // *ClientArgs_Mark Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"` @@ -1060,6 +1059,7 @@ type ServerArgs struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Argtype: + // // *ServerArgs_Setup // *ServerArgs_Mark Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"` @@ -1524,6 +1524,9 @@ type ScenarioResultSummary struct { // Queries per CPU-sec over all servers or clients ServerQueriesPerCpuSec float64 `protobuf:"fixed64,17,opt,name=server_queries_per_cpu_sec,json=serverQueriesPerCpuSec,proto3" json:"server_queries_per_cpu_sec,omitempty"` ClientQueriesPerCpuSec float64 `protobuf:"fixed64,18,opt,name=client_queries_per_cpu_sec,json=clientQueriesPerCpuSec,proto3" json:"client_queries_per_cpu_sec,omitempty"` + // Start and end time for the test scenario + StartTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + EndTime *timestamppb.Timestamp `protobuf:"bytes,20,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` } func (x *ScenarioResultSummary) Reset() { @@ -1684,6 +1687,20 @@ func (x *ScenarioResultSummary) GetClientQueriesPerCpuSec() float64 { return 0 } +func (x *ScenarioResultSummary) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *ScenarioResultSummary) GetEndTime() *timestamppb.Timestamp { + if x != nil { + return x.EndTime + } + return nil +} + // Results of a single benchmark scenario. type ScenarioResult struct { state protoimpl.MessageState @@ -1813,297 +1830,308 @@ var file_grpc_testing_control_proto_rawDesc = []byte{ 0x2f, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x32, 0x0a, 0x0d, 0x50, 0x6f, 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x65, - 0x64, 0x4c, 0x6f, 0x61, 0x64, 0x22, 0x12, 0x0a, 0x10, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x4c, - 0x6f, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0a, 0x4c, 0x6f, - 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x6c, 0x6f, 0x73, - 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x6f, - 0x73, 0x65, 0x64, 0x4c, 0x6f, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, - 0x0a, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x4c, 0x6f, 0x6f, 0x70, 0x12, 0x37, 0x0a, 0x07, 0x70, - 0x6f, 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x6f, 0x69, 0x73, - 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x07, 0x70, 0x6f, 0x69, - 0x73, 0x73, 0x6f, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x7f, 0x0a, 0x0e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1e, - 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x75, 0x73, 0x65, 0x54, 0x65, 0x73, 0x74, 0x43, 0x61, 0x12, 0x30, - 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6f, 0x76, - 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x72, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x72, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x22, 0x67, 0x0a, - 0x0a, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, - 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x05, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xf6, 0x07, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x39, - 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x3f, 0x0a, 0x1c, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, - 0x72, 0x70, 0x63, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x19, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x52, 0x70, 0x63, 0x73, 0x50, 0x65, 0x72, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, - 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x73, - 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, - 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x08, - 0x72, 0x70, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x70, - 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x72, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, - 0x0a, 0x0b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0a, 0x6c, - 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, 0x0a, - 0x10, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x5f, - 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6f, 0x72, 0x65, - 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, - 0x6d, 0x69, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, - 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x70, 0x69, 0x12, 0x3b, 0x0a, - 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x10, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x52, 0x0b, 0x63, - 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x68, - 0x72, 0x65, 0x61, 0x64, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x71, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x50, 0x65, 0x72, 0x43, 0x71, - 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x12, 0x28, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x61, 0x6c, 0x65, 0x73, 0x63, 0x65, - 0x5f, 0x61, 0x70, 0x69, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x43, - 0x6f, 0x61, 0x6c, 0x65, 0x73, 0x63, 0x65, 0x41, 0x70, 0x69, 0x12, 0x58, 0x0a, 0x29, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x05, 0x52, 0x25, 0x6d, - 0x65, 0x64, 0x69, 0x61, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4d, 0x69, - 0x6c, 0x6c, 0x69, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, - 0x3f, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x32, 0x0a, 0x0d, 0x50, 0x6f, 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x6c, + 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x6f, 0x66, 0x66, 0x65, 0x72, + 0x65, 0x64, 0x4c, 0x6f, 0x61, 0x64, 0x22, 0x12, 0x0a, 0x10, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, + 0x4c, 0x6f, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0a, 0x4c, + 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x63, 0x6c, 0x6f, + 0x73, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x22, 0x1c, 0x0a, 0x04, 0x4d, 0x61, 0x72, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x73, 0x65, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x73, 0x65, 0x74, 0x22, 0x75, - 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x32, 0x0a, 0x05, - 0x73, 0x65, 0x74, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, - 0x12, 0x28, 0x0a, 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, - 0x72, 0x6b, 0x48, 0x00, 0x52, 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x72, - 0x67, 0x74, 0x79, 0x70, 0x65, 0x22, 0xc0, 0x04, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x14, - 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x68, 0x72, - 0x65, 0x61, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x73, 0x79, 0x6e, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x1d, - 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x42, 0x0a, - 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x28, - 0x0a, 0x10, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, - 0x70, 0x69, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x70, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x65, - 0x61, 0x64, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x71, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0c, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x50, 0x65, 0x72, 0x43, 0x71, 0x12, 0x2f, - 0x0a, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x3c, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, - 0xea, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, - 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x29, 0x0a, - 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x75, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x41, 0x72, 0x67, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x48, 0x00, 0x52, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, 0x12, 0x28, 0x0a, 0x04, 0x6d, 0x61, - 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, 0x72, 0x6b, 0x48, 0x00, 0x52, 0x04, - 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x72, 0x67, 0x74, 0x79, 0x70, 0x65, 0x22, - 0x69, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x22, 0x0d, 0x0a, 0x0b, 0x43, 0x6f, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x0c, 0x43, 0x6f, 0x72, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x22, - 0x06, 0x0a, 0x04, 0x56, 0x6f, 0x69, 0x64, 0x22, 0xef, 0x02, 0x0a, 0x08, 0x53, 0x63, 0x65, 0x6e, - 0x61, 0x72, 0x69, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, - 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6e, 0x75, 0x6d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0d, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, - 0x75, 0x6d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, - 0x77, 0x61, 0x72, 0x6d, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x75, 0x70, 0x53, 0x65, 0x63, 0x6f, - 0x6e, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, - 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, - 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x12, 0x37, 0x0a, 0x18, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, - 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x15, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x41, 0x0a, 0x09, 0x53, 0x63, 0x65, - 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x73, 0x12, 0x34, 0x0a, 0x09, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, - 0x69, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, - 0x6f, 0x52, 0x09, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x73, 0x22, 0xbb, 0x06, 0x0a, - 0x15, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x03, 0x71, 0x70, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x71, 0x70, 0x73, 0x5f, - 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x71, 0x70, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x43, 0x6f, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, 0x65, - 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, - 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, - 0x2c, 0x0a, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x55, - 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x5f, 0x35, 0x30, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x35, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x5f, 0x39, 0x30, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x39, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x5f, 0x39, 0x35, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, - 0x63, 0x79, 0x39, 0x35, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, - 0x39, 0x39, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x39, 0x39, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x39, - 0x39, 0x39, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, - 0x79, 0x39, 0x39, 0x39, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, - 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, - 0x0a, 0x1e, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, - 0x75, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x65, + 0x6f, 0x73, 0x65, 0x64, 0x4c, 0x6f, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, + 0x52, 0x0a, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x4c, 0x6f, 0x6f, 0x70, 0x12, 0x37, 0x0a, 0x07, + 0x70, 0x6f, 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x6f, 0x69, + 0x73, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x07, 0x70, 0x6f, + 0x69, 0x73, 0x73, 0x6f, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x7f, 0x0a, + 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x1e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x75, 0x73, 0x65, 0x54, 0x65, 0x73, 0x74, 0x43, 0x61, 0x12, + 0x30, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6f, + 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x72, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x72, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x22, 0x67, + 0x0a, 0x0a, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x05, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xf6, 0x07, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, + 0x39, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x19, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x52, 0x70, 0x63, 0x73, 0x50, 0x65, 0x72, 0x43, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, + 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x61, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x73, 0x79, 0x6e, 0x63, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x30, 0x0a, + 0x08, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, + 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x72, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x39, 0x0a, 0x0b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0a, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x48, + 0x0a, 0x10, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x72, 0x65, + 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6f, 0x72, + 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x6f, 0x74, 0x68, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x70, 0x69, 0x12, 0x3b, + 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x10, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x52, 0x0b, + 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, + 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x71, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x50, 0x65, 0x72, 0x43, + 0x71, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x61, 0x6c, 0x65, 0x73, 0x63, + 0x65, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x75, 0x73, 0x65, + 0x43, 0x6f, 0x61, 0x6c, 0x65, 0x73, 0x63, 0x65, 0x41, 0x70, 0x69, 0x12, 0x58, 0x0a, 0x29, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x5f, 0x6d, 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x05, 0x52, 0x25, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4d, + 0x69, 0x6c, 0x6c, 0x69, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, + 0x22, 0x3f, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x22, 0x1c, 0x0a, 0x04, 0x4d, 0x61, 0x72, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x73, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x73, 0x65, 0x74, 0x22, + 0x75, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x32, 0x0a, + 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x05, 0x73, 0x65, 0x74, 0x75, + 0x70, 0x12, 0x28, 0x0a, 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4d, + 0x61, 0x72, 0x6b, 0x48, 0x00, 0x52, 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x61, + 0x72, 0x67, 0x74, 0x79, 0x70, 0x65, 0x22, 0xc0, 0x04, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x30, 0x0a, + 0x14, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x68, + 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x73, 0x79, + 0x6e, 0x63, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x42, + 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, + 0x28, 0x0a, 0x10, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x61, 0x70, 0x69, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x74, 0x68, 0x65, 0x72, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x70, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x68, 0x72, + 0x65, 0x61, 0x64, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x71, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x50, 0x65, 0x72, 0x43, 0x71, 0x12, + 0x2f, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, + 0x61, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x3c, 0x0a, 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, + 0x18, 0xea, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, + 0x67, 0x52, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x29, + 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x75, 0x0a, 0x0a, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x41, 0x72, 0x67, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x48, 0x00, 0x52, 0x05, 0x73, 0x65, 0x74, 0x75, 0x70, 0x12, 0x28, 0x0a, 0x04, 0x6d, + 0x61, 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, 0x72, 0x6b, 0x48, 0x00, 0x52, + 0x04, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x61, 0x72, 0x67, 0x74, 0x79, 0x70, 0x65, + 0x22, 0x69, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x22, 0x0d, 0x0a, 0x0b, 0x43, + 0x6f, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x0c, 0x43, 0x6f, + 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, + 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x63, 0x6f, 0x72, 0x65, 0x73, + 0x22, 0x06, 0x0a, 0x04, 0x56, 0x6f, 0x69, 0x64, 0x22, 0xef, 0x02, 0x0a, 0x08, 0x53, 0x63, 0x65, + 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x75, + 0x6d, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x6e, 0x75, 0x6d, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0d, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b, + 0x6e, 0x75, 0x6d, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x77, 0x61, 0x72, 0x6d, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x75, 0x70, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, + 0x6b, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x10, 0x62, 0x65, 0x6e, 0x63, 0x68, 0x6d, 0x61, 0x72, 0x6b, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x15, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x41, 0x0a, 0x09, 0x53, 0x63, + 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x73, 0x12, 0x34, 0x0a, 0x09, 0x73, 0x63, 0x65, 0x6e, 0x61, + 0x72, 0x69, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, + 0x69, 0x6f, 0x52, 0x09, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x73, 0x22, 0xad, 0x07, + 0x0a, 0x15, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x71, 0x70, 0x73, 0x12, 0x2d, 0x0a, 0x13, 0x71, 0x70, 0x73, + 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x71, 0x70, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, + 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x55, 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x5f, 0x35, 0x30, 0x18, 0x07, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x35, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x5f, 0x39, 0x30, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x39, 0x30, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x39, 0x35, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x39, 0x35, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, + 0x5f, 0x39, 0x39, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x39, 0x39, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, + 0x39, 0x39, 0x39, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x39, 0x39, 0x39, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x63, 0x70, 0x75, 0x5f, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x70, 0x75, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x43, 0x0a, 0x1e, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x12, 0x37, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x73, - 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x0f, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x6c, 0x73, 0x50, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x6c, 0x73, 0x50, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x71, 0x75, 0x65, - 0x72, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x65, 0x63, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x51, 0x75, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x50, 0x65, 0x72, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x12, 0x3a, - 0x0a, 0x1a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, - 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x50, 0x65, 0x72, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x22, 0xf6, 0x03, 0x0a, 0x0e, 0x53, - 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, - 0x08, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, - 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x08, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, - 0x6f, 0x12, 0x39, 0x0a, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x3c, 0x0a, 0x0c, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x07, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, - 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x07, 0x20, 0x03, - 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x2a, 0x56, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, - 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, 0x43, 0x4c, - 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4c, 0x4c, 0x42, 0x41, - 0x43, 0x4b, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x2a, 0x70, 0x0a, 0x0a, 0x53, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x59, 0x4e, - 0x43, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, - 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, - 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, 0x5f, 0x53, 0x45, - 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4c, 0x4c, - 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x04, 0x2a, 0x72, 0x0a, - 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x4e, 0x41, 0x52, - 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, - 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, - 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x19, 0x0a, - 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x52, 0x45, - 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x57, 0x41, 0x59, 0x53, 0x10, - 0x04, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1b, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x66, 0x75, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, + 0x63, 0x6f, 0x6e, 0x64, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x12, 0x37, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, + 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x6c, 0x73, + 0x50, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x01, 0x52, 0x15, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x6c, 0x73, 0x50, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x71, 0x75, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x65, + 0x63, 0x18, 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x51, + 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x50, 0x65, 0x72, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x12, + 0x3a, 0x0a, 0x1a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x51, 0x75, 0x65, 0x72, 0x69, + 0x65, 0x73, 0x50, 0x65, 0x72, 0x43, 0x70, 0x75, 0x53, 0x65, 0x63, 0x12, 0x39, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf6, 0x03, + 0x0a, 0x0e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x08, 0x73, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x08, 0x73, 0x63, 0x65, 0x6e, + 0x61, 0x72, 0x69, 0x6f, 0x12, 0x39, 0x0a, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, + 0x3c, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x3c, 0x0a, + 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x3d, + 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, + 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x75, 0x6d, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x25, 0x0a, + 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x08, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a, 0x0f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2a, 0x56, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x54, 0x48, 0x45, 0x52, + 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x41, 0x4c, + 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x2a, 0x70, + 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x00, 0x12, 0x10, 0x0a, + 0x0c, 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x18, 0x0a, 0x14, 0x41, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, + 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x54, 0x48, + 0x45, 0x52, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x43, + 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x04, + 0x2a, 0x72, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, + 0x4e, 0x41, 0x52, 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, + 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, + 0x4e, 0x47, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x52, + 0x4f, 0x4d, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x53, + 0x54, 0x52, 0x45, 0x41, 0x4d, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x57, 0x41, + 0x59, 0x53, 0x10, 0x04, 0x42, 0x21, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2147,8 +2175,9 @@ var file_grpc_testing_control_proto_goTypes = []interface{}{ (*HistogramParams)(nil), // 23: grpc.testing.HistogramParams (*ClientStats)(nil), // 24: grpc.testing.ClientStats (*ServerStats)(nil), // 25: grpc.testing.ServerStats - (*HistogramData)(nil), // 26: grpc.testing.HistogramData - (*RequestResultCount)(nil), // 27: grpc.testing.RequestResultCount + (*timestamppb.Timestamp)(nil), // 26: google.protobuf.Timestamp + (*HistogramData)(nil), // 27: grpc.testing.HistogramData + (*RequestResultCount)(nil), // 28: grpc.testing.RequestResultCount } var file_grpc_testing_control_proto_depIdxs = []int32{ 4, // 0: grpc.testing.LoadParams.closed_loop:type_name -> grpc.testing.ClosedLoopParams @@ -2173,17 +2202,19 @@ var file_grpc_testing_control_proto_depIdxs = []int32{ 8, // 19: grpc.testing.Scenario.client_config:type_name -> grpc.testing.ClientConfig 12, // 20: grpc.testing.Scenario.server_config:type_name -> grpc.testing.ServerConfig 18, // 21: grpc.testing.Scenarios.scenarios:type_name -> grpc.testing.Scenario - 18, // 22: grpc.testing.ScenarioResult.scenario:type_name -> grpc.testing.Scenario - 26, // 23: grpc.testing.ScenarioResult.latencies:type_name -> grpc.testing.HistogramData - 24, // 24: grpc.testing.ScenarioResult.client_stats:type_name -> grpc.testing.ClientStats - 25, // 25: grpc.testing.ScenarioResult.server_stats:type_name -> grpc.testing.ServerStats - 20, // 26: grpc.testing.ScenarioResult.summary:type_name -> grpc.testing.ScenarioResultSummary - 27, // 27: grpc.testing.ScenarioResult.request_results:type_name -> grpc.testing.RequestResultCount - 28, // [28:28] is the sub-list for method output_type - 28, // [28:28] is the sub-list for method input_type - 28, // [28:28] is the sub-list for extension type_name - 28, // [28:28] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 26, // 22: grpc.testing.ScenarioResultSummary.start_time:type_name -> google.protobuf.Timestamp + 26, // 23: grpc.testing.ScenarioResultSummary.end_time:type_name -> google.protobuf.Timestamp + 18, // 24: grpc.testing.ScenarioResult.scenario:type_name -> grpc.testing.Scenario + 27, // 25: grpc.testing.ScenarioResult.latencies:type_name -> grpc.testing.HistogramData + 24, // 26: grpc.testing.ScenarioResult.client_stats:type_name -> grpc.testing.ClientStats + 25, // 27: grpc.testing.ScenarioResult.server_stats:type_name -> grpc.testing.ServerStats + 20, // 28: grpc.testing.ScenarioResult.summary:type_name -> grpc.testing.ScenarioResultSummary + 28, // 29: grpc.testing.ScenarioResult.request_results:type_name -> grpc.testing.RequestResultCount + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name } func init() { file_grpc_testing_control_proto_init() } diff --git a/interop/grpc_testing/core/stats.pb.go b/interop/grpc_testing/core/stats.pb.go index e5652dc78cc3..f6cc0f1daff0 100644 --- a/interop/grpc_testing/core/stats.pb.go +++ b/interop/grpc_testing/core/stats.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/core/stats.proto -package grpc_core +package core import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type Bucket struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -148,6 +143,7 @@ type Metric struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Types that are assignable to Value: + // // *Metric_Count // *Metric_Histogram Value isMetric_Value `protobuf_oneof:"value"` diff --git a/interop/grpc_testing/empty.pb.go b/interop/grpc_testing/empty.pb.go index 5378d2c58d0f..d23993223dba 100644 --- a/interop/grpc_testing/empty.pb.go +++ b/interop/grpc_testing/empty.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/empty.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,18 +34,13 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // An empty message that you can re-use to avoid defining duplicated empty // messages in your project. A typical example is to use it as argument or the // return value of a service API. For instance: // -// service Foo { -// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { }; -// }; -// +// service Foo { +// rpc Bar (grpc.testing.Empty) returns (grpc.testing.Empty) { }; +// }; type Empty struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -91,7 +85,10 @@ var file_grpc_testing_empty_proto_rawDesc = []byte{ 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x79, 0x42, 0x2a, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x0b, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/interop/grpc_testing/messages.pb.go b/interop/grpc_testing/messages.pb.go index f956a5ad771f..ccc27a936697 100644 --- a/interop/grpc_testing/messages.pb.go +++ b/interop/grpc_testing/messages.pb.go @@ -16,14 +16,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/messages.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The type of payload that should be returned. type PayloadType int32 @@ -387,6 +382,8 @@ type SimpleRequest struct { FillServerId bool `protobuf:"varint,9,opt,name=fill_server_id,json=fillServerId,proto3" json:"fill_server_id,omitempty"` // Whether SimpleResponse should include grpclb_route_type. FillGrpclbRouteType bool `protobuf:"varint,10,opt,name=fill_grpclb_route_type,json=fillGrpclbRouteType,proto3" json:"fill_grpclb_route_type,omitempty"` + // If set the server should record this metrics report data for the current RPC. + OrcaPerQueryReport *TestOrcaReport `protobuf:"bytes,11,opt,name=orca_per_query_report,json=orcaPerQueryReport,proto3" json:"orca_per_query_report,omitempty"` } func (x *SimpleRequest) Reset() { @@ -491,6 +488,13 @@ func (x *SimpleRequest) GetFillGrpclbRouteType() bool { return false } +func (x *SimpleRequest) GetOrcaPerQueryReport() *TestOrcaReport { + if x != nil { + return x.OrcaPerQueryReport + } + return nil +} + // Unary response, as configured by the request. type SimpleResponse struct { state protoimpl.MessageState @@ -785,6 +789,8 @@ type StreamingOutputCallRequest struct { Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` // Whether server should return a given status ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus,proto3" json:"response_status,omitempty"` + // If set the server should update this metrics report data at the OOB server. + OrcaOobReport *TestOrcaReport `protobuf:"bytes,8,opt,name=orca_oob_report,json=orcaOobReport,proto3" json:"orca_oob_report,omitempty"` } func (x *StreamingOutputCallRequest) Reset() { @@ -847,6 +853,13 @@ func (x *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { return nil } +func (x *StreamingOutputCallRequest) GetOrcaOobReport() *TestOrcaReport { + if x != nil { + return x.OrcaOobReport + } + return nil +} + // Server-streaming response, as configured by the request and parameters. type StreamingOutputCallResponse struct { state protoimpl.MessageState @@ -1173,17 +1186,17 @@ type LoadBalancerAccumulatedStatsResponse struct { // The total number of RPCs have ever issued for each type. // Deprecated: use stats_per_method.rpcs_started instead. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in grpc/testing/messages.proto. NumRpcsStartedByMethod map[string]int32 `protobuf:"bytes,1,rep,name=num_rpcs_started_by_method,json=numRpcsStartedByMethod,proto3" json:"num_rpcs_started_by_method,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // The total number of RPCs have ever completed successfully for each type. // Deprecated: use stats_per_method.result instead. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in grpc/testing/messages.proto. NumRpcsSucceededByMethod map[string]int32 `protobuf:"bytes,2,rep,name=num_rpcs_succeeded_by_method,json=numRpcsSucceededByMethod,proto3" json:"num_rpcs_succeeded_by_method,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // The total number of RPCs have ever failed for each type. // Deprecated: use stats_per_method.result instead. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in grpc/testing/messages.proto. NumRpcsFailedByMethod map[string]int32 `protobuf:"bytes,3,rep,name=num_rpcs_failed_by_method,json=numRpcsFailedByMethod,proto3" json:"num_rpcs_failed_by_method,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // Per-method RPC statistics. The key is the RpcType in string form; e.g. // 'EMPTY_CALL' or 'UNARY_CALL' @@ -1222,7 +1235,7 @@ func (*LoadBalancerAccumulatedStatsResponse) Descriptor() ([]byte, []int) { return file_grpc_testing_messages_proto_rawDescGZIP(), []int{15} } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in grpc/testing/messages.proto. func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsStartedByMethod() map[string]int32 { if x != nil { return x.NumRpcsStartedByMethod @@ -1230,7 +1243,7 @@ func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsStartedByMethod() map[s return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in grpc/testing/messages.proto. func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsSucceededByMethod() map[string]int32 { if x != nil { return x.NumRpcsSucceededByMethod @@ -1238,7 +1251,7 @@ func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsSucceededByMethod() map return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in grpc/testing/messages.proto. func (x *LoadBalancerAccumulatedStatsResponse) GetNumRpcsFailedByMethod() map[string]int32 { if x != nil { return x.NumRpcsFailedByMethod @@ -1360,6 +1373,80 @@ func (*ClientConfigureResponse) Descriptor() ([]byte, []int) { return file_grpc_testing_messages_proto_rawDescGZIP(), []int{17} } +// Metrics data the server will update and send to the client. It mirrors orca load report +// https://github.com/cncf/xds/blob/eded343319d09f30032952beda9840bbd3dcf7ac/xds/data/orca/v3/orca_load_report.proto#L15, +// but avoids orca dependency. Used by both per-query and out-of-band reporting tests. +type TestOrcaReport struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CpuUtilization float64 `protobuf:"fixed64,1,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"` + MemoryUtilization float64 `protobuf:"fixed64,2,opt,name=memory_utilization,json=memoryUtilization,proto3" json:"memory_utilization,omitempty"` + RequestCost map[string]float64 `protobuf:"bytes,3,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` + Utilization map[string]float64 `protobuf:"bytes,4,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"` +} + +func (x *TestOrcaReport) Reset() { + *x = TestOrcaReport{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_testing_messages_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TestOrcaReport) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestOrcaReport) ProtoMessage() {} + +func (x *TestOrcaReport) ProtoReflect() protoreflect.Message { + mi := &file_grpc_testing_messages_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestOrcaReport.ProtoReflect.Descriptor instead. +func (*TestOrcaReport) Descriptor() ([]byte, []int) { + return file_grpc_testing_messages_proto_rawDescGZIP(), []int{18} +} + +func (x *TestOrcaReport) GetCpuUtilization() float64 { + if x != nil { + return x.CpuUtilization + } + return 0 +} + +func (x *TestOrcaReport) GetMemoryUtilization() float64 { + if x != nil { + return x.MemoryUtilization + } + return 0 +} + +func (x *TestOrcaReport) GetRequestCost() map[string]float64 { + if x != nil { + return x.RequestCost + } + return nil +} + +func (x *TestOrcaReport) GetUtilization() map[string]float64 { + if x != nil { + return x.Utilization + } + return nil +} + type LoadBalancerStatsResponse_RpcsByPeer struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1372,7 +1459,7 @@ type LoadBalancerStatsResponse_RpcsByPeer struct { func (x *LoadBalancerStatsResponse_RpcsByPeer) Reset() { *x = LoadBalancerStatsResponse_RpcsByPeer{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[18] + mi := &file_grpc_testing_messages_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1385,7 +1472,7 @@ func (x *LoadBalancerStatsResponse_RpcsByPeer) String() string { func (*LoadBalancerStatsResponse_RpcsByPeer) ProtoMessage() {} func (x *LoadBalancerStatsResponse_RpcsByPeer) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[18] + mi := &file_grpc_testing_messages_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1423,7 +1510,7 @@ type LoadBalancerAccumulatedStatsResponse_MethodStats struct { func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) Reset() { *x = LoadBalancerAccumulatedStatsResponse_MethodStats{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[25] + mi := &file_grpc_testing_messages_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1436,7 +1523,7 @@ func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) String() string { func (*LoadBalancerAccumulatedStatsResponse_MethodStats) ProtoMessage() {} func (x *LoadBalancerAccumulatedStatsResponse_MethodStats) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[25] + mi := &file_grpc_testing_messages_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1480,7 +1567,7 @@ type ClientConfigureRequest_Metadata struct { func (x *ClientConfigureRequest_Metadata) Reset() { *x = ClientConfigureRequest_Metadata{} if protoimpl.UnsafeEnabled { - mi := &file_grpc_testing_messages_proto_msgTypes[28] + mi := &file_grpc_testing_messages_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1493,7 +1580,7 @@ func (x *ClientConfigureRequest_Metadata) String() string { func (*ClientConfigureRequest_Metadata) ProtoMessage() {} func (x *ClientConfigureRequest_Metadata) ProtoReflect() protoreflect.Message { - mi := &file_grpc_testing_messages_proto_msgTypes[28] + mi := &file_grpc_testing_messages_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1546,7 +1633,7 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x45, 0x63, 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xa2, 0x04, 0x0a, 0x0d, 0x53, 0x69, 0x6d, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xf3, 0x04, 0x0a, 0x0d, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, @@ -1580,232 +1667,268 @@ var file_grpc_testing_messages_proto_rawDesc = []byte{ 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x6c, 0x47, 0x72, - 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x82, 0x02, - 0x0a, 0x0e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, 0x67, - 0x72, 0x70, 0x63, 0x6c, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, - 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x43, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0x54, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x82, 0x01, - 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x55, 0x73, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4f, 0x0a, + 0x15, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, + 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x12, 0x6f, 0x72, 0x63, 0x61, + 0x50, 0x65, 0x72, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x82, + 0x02, 0x0a, 0x0e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x11, + 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, + 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, + 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, + 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x65, 0x64, 0x22, 0xa3, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x41, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x63, - 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x4e, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4a, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6d, - 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x63, - 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, - 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x6f, - 0x66, 0x66, 0x4d, 0x73, 0x22, 0x46, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, - 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x73, 0x22, 0x56, 0x0a, 0x18, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x43, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0x54, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x82, + 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x55, 0x73, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x42, 0x6f, + 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x22, 0xe9, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x41, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, + 0x63, 0x68, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x44, 0x0a, 0x0f, 0x6f, 0x72, 0x63, + 0x61, 0x5f, 0x6f, 0x6f, 0x62, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x52, 0x0d, 0x6f, 0x72, 0x63, 0x61, 0x4f, 0x6f, 0x62, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x22, + 0x4e, 0x0a, 0x1b, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, + 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, + 0x4a, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6d, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4d, 0x73, 0x22, 0x46, 0x0a, 0x0d, 0x52, + 0x65, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, + 0x73, 0x73, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x4d, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x22, 0xe2, 0x04, 0x0a, 0x19, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, 0x5f, - 0x72, 0x70, 0x63, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x6e, 0x75, 0x6d, 0x52, - 0x70, 0x63, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, - 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x53, 0x65, 0x63, 0x22, 0xe2, 0x04, 0x0a, 0x19, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x0a, - 0x0c, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x5f, 0x0a, 0x0e, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x1a, 0xb1, 0x01, 0x0a, 0x0a, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, - 0x12, 0x64, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, - 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, - 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, - 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, - 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x73, 0x0a, 0x11, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, - 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, - 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x86, 0x09, 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, - 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, - 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, - 0x18, 0x01, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, - 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, - 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, - 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, - 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x8b, 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0c, 0x72, 0x70, 0x63, + 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, + 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, + 0x50, 0x65, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x0e, 0x72, 0x70, 0x63, 0x73, 0x5f, + 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, + 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x70, 0x63, 0x73, + 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0xb1, 0x01, 0x0a, 0x0a, 0x52, 0x70, 0x63, + 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x64, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, + 0x62, 0x79, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, + 0x72, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x3d, 0x0a, + 0x0f, 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, + 0x52, 0x70, 0x63, 0x73, 0x42, 0x79, 0x50, 0x65, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x73, 0x0a, 0x11, 0x52, + 0x70, 0x63, 0x73, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x48, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x70, 0x63, 0x73, 0x42, + 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x25, 0x0a, 0x23, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, + 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x86, 0x09, 0x0a, 0x24, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x8e, 0x01, 0x0a, 0x1a, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, - 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, - 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, - 0x70, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, - 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, - 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x1a, 0x49, 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, - 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, - 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, - 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, + 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x52, 0x70, + 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x94, 0x01, 0x0a, 0x1c, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x6d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x50, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x75, 0x6d, + 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x18, + 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, + 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x8b, 0x01, 0x0a, 0x19, 0x6e, 0x75, 0x6d, + 0x5f, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, + 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, + 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x15, 0x6e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x42, 0x79, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x70, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x46, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, + 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x73, 0x50, + 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x49, 0x0a, 0x1b, 0x4e, 0x75, 0x6d, 0x52, + 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, - 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, - 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, - 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, + 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x1d, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x53, 0x75, + 0x63, 0x63, 0x65, 0x65, 0x64, 0x65, 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x48, 0x0a, 0x1a, 0x4e, 0x75, 0x6d, 0x52, 0x70, 0x63, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x42, 0x79, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xcf, 0x01, 0x0a, 0x0b, 0x4d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x70, + 0x63, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0b, 0x72, 0x70, 0x63, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x62, 0x0a, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, + 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, + 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x81, 0x01, 0x0a, + 0x13, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x4c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x72, 0x41, 0x63, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xe9, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x05, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, - 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x53, 0x65, 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, - 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, - 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, - 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2a, 0x1f, 0x0a, 0x0b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x10, 0x0a, 0x0c, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, - 0x00, 0x2a, 0x6f, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, - 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, - 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, 0x4c, 0x4c, 0x42, 0x41, 0x43, - 0x4b, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, - 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, - 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x49, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x1a, 0x74, 0x0a, 0x08, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x70, 0x63, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0x29, 0x0a, 0x07, 0x52, 0x70, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, + 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, + 0x55, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x01, 0x22, 0x19, 0x0a, 0x17, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x0e, 0x54, 0x65, 0x73, 0x74, + 0x4f, 0x72, 0x63, 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x70, + 0x75, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x75, 0x74, + 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x11, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, + 0x73, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, 0x61, + 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, + 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x43, 0x6f, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x4f, 0x72, 0x63, + 0x61, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x1f, 0x0a, 0x0b, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, + 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x2a, 0x6f, 0x0a, 0x0f, 0x47, 0x72, 0x70, 0x63, 0x6c, 0x62, + 0x52, 0x6f, 0x75, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, + 0x43, 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x47, 0x52, 0x50, 0x43, + 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x41, + 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x52, 0x50, 0x43, + 0x4c, 0x42, 0x5f, 0x52, 0x4f, 0x55, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, + 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x10, 0x02, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1821,7 +1944,7 @@ func file_grpc_testing_messages_proto_rawDescGZIP() []byte { } var file_grpc_testing_messages_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_grpc_testing_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 29) +var file_grpc_testing_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_grpc_testing_messages_proto_goTypes = []interface{}{ (PayloadType)(0), // 0: grpc.testing.PayloadType (GrpclbRouteType)(0), // 1: grpc.testing.GrpclbRouteType @@ -1844,17 +1967,20 @@ var file_grpc_testing_messages_proto_goTypes = []interface{}{ (*LoadBalancerAccumulatedStatsResponse)(nil), // 18: grpc.testing.LoadBalancerAccumulatedStatsResponse (*ClientConfigureRequest)(nil), // 19: grpc.testing.ClientConfigureRequest (*ClientConfigureResponse)(nil), // 20: grpc.testing.ClientConfigureResponse - (*LoadBalancerStatsResponse_RpcsByPeer)(nil), // 21: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer - nil, // 22: grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry - nil, // 23: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry - nil, // 24: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry - nil, // 25: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry - nil, // 26: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry - nil, // 27: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry - (*LoadBalancerAccumulatedStatsResponse_MethodStats)(nil), // 28: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats - nil, // 29: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry - nil, // 30: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry - (*ClientConfigureRequest_Metadata)(nil), // 31: grpc.testing.ClientConfigureRequest.Metadata + (*TestOrcaReport)(nil), // 21: grpc.testing.TestOrcaReport + (*LoadBalancerStatsResponse_RpcsByPeer)(nil), // 22: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer + nil, // 23: grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry + nil, // 24: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry + nil, // 25: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry + nil, // 26: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry + nil, // 27: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry + nil, // 28: grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry + (*LoadBalancerAccumulatedStatsResponse_MethodStats)(nil), // 29: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats + nil, // 30: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry + nil, // 31: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry + (*ClientConfigureRequest_Metadata)(nil), // 32: grpc.testing.ClientConfigureRequest.Metadata + nil, // 33: grpc.testing.TestOrcaReport.RequestCostEntry + nil, // 34: grpc.testing.TestOrcaReport.UtilizationEntry } var file_grpc_testing_messages_proto_depIdxs = []int32{ 0, // 0: grpc.testing.Payload.type:type_name -> grpc.testing.PayloadType @@ -1863,34 +1989,38 @@ var file_grpc_testing_messages_proto_depIdxs = []int32{ 3, // 3: grpc.testing.SimpleRequest.response_compressed:type_name -> grpc.testing.BoolValue 5, // 4: grpc.testing.SimpleRequest.response_status:type_name -> grpc.testing.EchoStatus 3, // 5: grpc.testing.SimpleRequest.expect_compressed:type_name -> grpc.testing.BoolValue - 4, // 6: grpc.testing.SimpleResponse.payload:type_name -> grpc.testing.Payload - 1, // 7: grpc.testing.SimpleResponse.grpclb_route_type:type_name -> grpc.testing.GrpclbRouteType - 4, // 8: grpc.testing.StreamingInputCallRequest.payload:type_name -> grpc.testing.Payload - 3, // 9: grpc.testing.StreamingInputCallRequest.expect_compressed:type_name -> grpc.testing.BoolValue - 3, // 10: grpc.testing.ResponseParameters.compressed:type_name -> grpc.testing.BoolValue - 0, // 11: grpc.testing.StreamingOutputCallRequest.response_type:type_name -> grpc.testing.PayloadType - 10, // 12: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters - 4, // 13: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload - 5, // 14: grpc.testing.StreamingOutputCallRequest.response_status:type_name -> grpc.testing.EchoStatus - 4, // 15: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload - 22, // 16: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry - 23, // 17: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry - 25, // 18: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry - 26, // 19: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry - 27, // 20: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry - 29, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry - 2, // 22: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 31, // 23: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata - 24, // 24: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry - 21, // 25: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer - 30, // 26: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry - 28, // 27: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats - 2, // 28: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType - 29, // [29:29] is the sub-list for method output_type - 29, // [29:29] is the sub-list for method input_type - 29, // [29:29] is the sub-list for extension type_name - 29, // [29:29] is the sub-list for extension extendee - 0, // [0:29] is the sub-list for field type_name + 21, // 6: grpc.testing.SimpleRequest.orca_per_query_report:type_name -> grpc.testing.TestOrcaReport + 4, // 7: grpc.testing.SimpleResponse.payload:type_name -> grpc.testing.Payload + 1, // 8: grpc.testing.SimpleResponse.grpclb_route_type:type_name -> grpc.testing.GrpclbRouteType + 4, // 9: grpc.testing.StreamingInputCallRequest.payload:type_name -> grpc.testing.Payload + 3, // 10: grpc.testing.StreamingInputCallRequest.expect_compressed:type_name -> grpc.testing.BoolValue + 3, // 11: grpc.testing.ResponseParameters.compressed:type_name -> grpc.testing.BoolValue + 0, // 12: grpc.testing.StreamingOutputCallRequest.response_type:type_name -> grpc.testing.PayloadType + 10, // 13: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters + 4, // 14: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload + 5, // 15: grpc.testing.StreamingOutputCallRequest.response_status:type_name -> grpc.testing.EchoStatus + 21, // 16: grpc.testing.StreamingOutputCallRequest.orca_oob_report:type_name -> grpc.testing.TestOrcaReport + 4, // 17: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload + 23, // 18: grpc.testing.LoadBalancerStatsResponse.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeerEntry + 24, // 19: grpc.testing.LoadBalancerStatsResponse.rpcs_by_method:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry + 26, // 20: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_started_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsStartedByMethodEntry + 27, // 21: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_succeeded_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsSucceededByMethodEntry + 28, // 22: grpc.testing.LoadBalancerAccumulatedStatsResponse.num_rpcs_failed_by_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.NumRpcsFailedByMethodEntry + 30, // 23: grpc.testing.LoadBalancerAccumulatedStatsResponse.stats_per_method:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry + 2, // 24: grpc.testing.ClientConfigureRequest.types:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 32, // 25: grpc.testing.ClientConfigureRequest.metadata:type_name -> grpc.testing.ClientConfigureRequest.Metadata + 33, // 26: grpc.testing.TestOrcaReport.request_cost:type_name -> grpc.testing.TestOrcaReport.RequestCostEntry + 34, // 27: grpc.testing.TestOrcaReport.utilization:type_name -> grpc.testing.TestOrcaReport.UtilizationEntry + 25, // 28: grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.rpcs_by_peer:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer.RpcsByPeerEntry + 22, // 29: grpc.testing.LoadBalancerStatsResponse.RpcsByMethodEntry.value:type_name -> grpc.testing.LoadBalancerStatsResponse.RpcsByPeer + 31, // 30: grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.result:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats.ResultEntry + 29, // 31: grpc.testing.LoadBalancerAccumulatedStatsResponse.StatsPerMethodEntry.value:type_name -> grpc.testing.LoadBalancerAccumulatedStatsResponse.MethodStats + 2, // 32: grpc.testing.ClientConfigureRequest.Metadata.type:type_name -> grpc.testing.ClientConfigureRequest.RpcType + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name } func init() { file_grpc_testing_messages_proto_init() } @@ -2116,6 +2246,18 @@ func file_grpc_testing_messages_proto_init() { } } file_grpc_testing_messages_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TestOrcaReport); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_testing_messages_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LoadBalancerStatsResponse_RpcsByPeer); i { case 0: return &v.state @@ -2127,7 +2269,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LoadBalancerAccumulatedStatsResponse_MethodStats); i { case 0: return &v.state @@ -2139,7 +2281,7 @@ func file_grpc_testing_messages_proto_init() { return nil } } - file_grpc_testing_messages_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_grpc_testing_messages_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ClientConfigureRequest_Metadata); i { case 0: return &v.state @@ -2158,7 +2300,7 @@ func file_grpc_testing_messages_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_grpc_testing_messages_proto_rawDesc, NumEnums: 3, - NumMessages: 29, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, diff --git a/interop/grpc_testing/payloads.pb.go b/interop/grpc_testing/payloads.pb.go index b85a289c1532..6c4af8a1aab8 100644 --- a/interop/grpc_testing/payloads.pb.go +++ b/interop/grpc_testing/payloads.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/payloads.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ByteBufferParams struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -193,6 +188,7 @@ type PayloadConfig struct { unknownFields protoimpl.UnknownFields // Types that are assignable to Payload: + // // *PayloadConfig_BytebufParams // *PayloadConfig_SimpleParams // *PayloadConfig_ComplexParams @@ -313,7 +309,9 @@ var file_grpc_testing_payloads_proto_rawDesc = []byte{ 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x0d, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x78, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x61, 0x64, 0x42, 0x22, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x0d, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/interop/grpc_testing/report_qps_scenario_service.pb.go b/interop/grpc_testing/report_qps_scenario_service.pb.go index 0f4de5984942..831c38daaba7 100644 --- a/interop/grpc_testing/report_qps_scenario_service.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/report_qps_scenario_service.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var File_grpc_testing_report_qps_scenario_service_proto protoreflect.FileDescriptor var file_grpc_testing_report_qps_scenario_service_proto_rawDesc = []byte{ @@ -55,8 +50,11 @@ var file_grpc_testing_report_qps_scenario_service_proto_rawDesc = []byte{ 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x12, 0x1c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x42, 0x32, 0x0a, 0x0f, 0x69, 0x6f, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x1d, 0x52, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x51, 0x70, 0x73, 0x53, 0x63, 0x65, 0x6e, 0x61, 0x72, 0x69, 0x6f, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_grpc_testing_report_qps_scenario_service_proto_goTypes = []interface{}{ diff --git a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go index b0fe8c8f5ee5..33392bc6ae33 100644 --- a/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go +++ b/interop/grpc_testing/report_qps_scenario_service_grpc.pb.go @@ -1,4 +1,25 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/testing/report_qps_scenario_service.proto package grpc_testing @@ -14,6 +35,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ReportQpsScenarioService_ReportScenario_FullMethodName = "/grpc.testing.ReportQpsScenarioService/ReportScenario" +) + // ReportQpsScenarioServiceClient is the client API for ReportQpsScenarioService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -32,7 +57,7 @@ func NewReportQpsScenarioServiceClient(cc grpc.ClientConnInterface) ReportQpsSce func (c *reportQpsScenarioServiceClient) ReportScenario(ctx context.Context, in *ScenarioResult, opts ...grpc.CallOption) (*Void, error) { out := new(Void) - err := c.cc.Invoke(ctx, "/grpc.testing.ReportQpsScenarioService/ReportScenario", in, out, opts...) + err := c.cc.Invoke(ctx, ReportQpsScenarioService_ReportScenario_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -79,7 +104,7 @@ func _ReportQpsScenarioService_ReportScenario_Handler(srv interface{}, ctx conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.ReportQpsScenarioService/ReportScenario", + FullMethod: ReportQpsScenarioService_ReportScenario_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReportQpsScenarioServiceServer).ReportScenario(ctx, req.(*ScenarioResult)) diff --git a/interop/grpc_testing/stats.pb.go b/interop/grpc_testing/stats.pb.go index 3ff0ccd80b28..9d184f978fbb 100644 --- a/interop/grpc_testing/stats.pb.go +++ b/interop/grpc_testing/stats.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/stats.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" core "google.golang.org/grpc/interop/grpc_testing/core" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ServerStats struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -507,8 +502,10 @@ var file_grpc_testing_stats_proto_rawDesc = []byte{ 0x63, 0x71, 0x50, 0x6f, 0x6c, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x0a, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x52, 0x09, 0x63, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x1f, 0x0a, 0x0f, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, + 0x0a, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/interop/grpc_testing/test.pb.go b/interop/grpc_testing/test.pb.go index 50db0950b9c5..b1921f8565de 100644 --- a/interop/grpc_testing/test.pb.go +++ b/interop/grpc_testing/test.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/test.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var File_grpc_testing_test_proto protoreflect.FileDescriptor var file_grpc_testing_test_proto_rawDesc = []byte{ @@ -142,7 +137,9 @@ var file_grpc_testing_test_proto_rawDesc = []byte{ 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x73, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_grpc_testing_test_proto_goTypes = []interface{}{ diff --git a/interop/grpc_testing/test_grpc.pb.go b/interop/grpc_testing/test_grpc.pb.go index ad5310aed623..fcf87509acff 100644 --- a/interop/grpc_testing/test_grpc.pb.go +++ b/interop/grpc_testing/test_grpc.pb.go @@ -1,4 +1,25 @@ +// Copyright 2015-2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/testing/test.proto package grpc_testing @@ -14,6 +35,17 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + TestService_EmptyCall_FullMethodName = "/grpc.testing.TestService/EmptyCall" + TestService_UnaryCall_FullMethodName = "/grpc.testing.TestService/UnaryCall" + TestService_CacheableUnaryCall_FullMethodName = "/grpc.testing.TestService/CacheableUnaryCall" + TestService_StreamingOutputCall_FullMethodName = "/grpc.testing.TestService/StreamingOutputCall" + TestService_StreamingInputCall_FullMethodName = "/grpc.testing.TestService/StreamingInputCall" + TestService_FullDuplexCall_FullMethodName = "/grpc.testing.TestService/FullDuplexCall" + TestService_HalfDuplexCall_FullMethodName = "/grpc.testing.TestService/HalfDuplexCall" + TestService_UnimplementedCall_FullMethodName = "/grpc.testing.TestService/UnimplementedCall" +) + // TestServiceClient is the client API for TestService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -56,7 +88,7 @@ func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_EmptyCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -65,7 +97,7 @@ func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...gr func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_UnaryCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -74,7 +106,7 @@ func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, op func (c *testServiceClient) CacheableUnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/CacheableUnaryCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_CacheableUnaryCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -82,7 +114,7 @@ func (c *testServiceClient) CacheableUnaryCall(ctx context.Context, in *SimpleRe } func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], "/grpc.testing.TestService/StreamingOutputCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], TestService_StreamingOutputCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -114,7 +146,7 @@ func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallRespo } func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], "/grpc.testing.TestService/StreamingInputCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], TestService_StreamingInputCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -148,7 +180,7 @@ func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCal } func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], "/grpc.testing.TestService/FullDuplexCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], TestService_FullDuplexCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -179,7 +211,7 @@ func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, } func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], "/grpc.testing.TestService/HalfDuplexCall", opts...) + stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], TestService_HalfDuplexCall_FullMethodName, opts...) if err != nil { return nil, err } @@ -211,7 +243,7 @@ func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, func (c *testServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnimplementedCall", in, out, opts...) + err := c.cc.Invoke(ctx, TestService_UnimplementedCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -302,7 +334,7 @@ func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/EmptyCall", + FullMethod: TestService_EmptyCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) @@ -320,7 +352,7 @@ func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/UnaryCall", + FullMethod: TestService_UnaryCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) @@ -338,7 +370,7 @@ func _TestService_CacheableUnaryCall_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/CacheableUnaryCall", + FullMethod: TestService_CacheableUnaryCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).CacheableUnaryCall(ctx, req.(*SimpleRequest)) @@ -455,7 +487,7 @@ func _TestService_UnimplementedCall_Handler(srv interface{}, ctx context.Context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.TestService/UnimplementedCall", + FullMethod: TestService_UnimplementedCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(TestServiceServer).UnimplementedCall(ctx, req.(*Empty)) @@ -514,6 +546,10 @@ var TestService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + UnimplementedService_UnimplementedCall_FullMethodName = "/grpc.testing.UnimplementedService/UnimplementedCall" +) + // UnimplementedServiceClient is the client API for UnimplementedService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -532,7 +568,7 @@ func NewUnimplementedServiceClient(cc grpc.ClientConnInterface) UnimplementedSer func (c *unimplementedServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.UnimplementedService/UnimplementedCall", in, out, opts...) + err := c.cc.Invoke(ctx, UnimplementedService_UnimplementedCall_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -578,7 +614,7 @@ func _UnimplementedService_UnimplementedCall_Handler(srv interface{}, ctx contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.UnimplementedService/UnimplementedCall", + FullMethod: UnimplementedService_UnimplementedCall_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(UnimplementedServiceServer).UnimplementedCall(ctx, req.(*Empty)) @@ -602,6 +638,11 @@ var UnimplementedService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + ReconnectService_Start_FullMethodName = "/grpc.testing.ReconnectService/Start" + ReconnectService_Stop_FullMethodName = "/grpc.testing.ReconnectService/Stop" +) + // ReconnectServiceClient is the client API for ReconnectService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -620,7 +661,7 @@ func NewReconnectServiceClient(cc grpc.ClientConnInterface) ReconnectServiceClie func (c *reconnectServiceClient) Start(ctx context.Context, in *ReconnectParams, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.ReconnectService/Start", in, out, opts...) + err := c.cc.Invoke(ctx, ReconnectService_Start_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -629,7 +670,7 @@ func (c *reconnectServiceClient) Start(ctx context.Context, in *ReconnectParams, func (c *reconnectServiceClient) Stop(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReconnectInfo, error) { out := new(ReconnectInfo) - err := c.cc.Invoke(ctx, "/grpc.testing.ReconnectService/Stop", in, out, opts...) + err := c.cc.Invoke(ctx, ReconnectService_Stop_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -678,7 +719,7 @@ func _ReconnectService_Start_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.ReconnectService/Start", + FullMethod: ReconnectService_Start_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReconnectServiceServer).Start(ctx, req.(*ReconnectParams)) @@ -696,7 +737,7 @@ func _ReconnectService_Stop_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.ReconnectService/Stop", + FullMethod: ReconnectService_Stop_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ReconnectServiceServer).Stop(ctx, req.(*Empty)) @@ -724,6 +765,11 @@ var ReconnectService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + LoadBalancerStatsService_GetClientStats_FullMethodName = "/grpc.testing.LoadBalancerStatsService/GetClientStats" + LoadBalancerStatsService_GetClientAccumulatedStats_FullMethodName = "/grpc.testing.LoadBalancerStatsService/GetClientAccumulatedStats" +) + // LoadBalancerStatsServiceClient is the client API for LoadBalancerStatsService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -744,7 +790,7 @@ func NewLoadBalancerStatsServiceClient(cc grpc.ClientConnInterface) LoadBalancer func (c *loadBalancerStatsServiceClient) GetClientStats(ctx context.Context, in *LoadBalancerStatsRequest, opts ...grpc.CallOption) (*LoadBalancerStatsResponse, error) { out := new(LoadBalancerStatsResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.LoadBalancerStatsService/GetClientStats", in, out, opts...) + err := c.cc.Invoke(ctx, LoadBalancerStatsService_GetClientStats_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -753,7 +799,7 @@ func (c *loadBalancerStatsServiceClient) GetClientStats(ctx context.Context, in func (c *loadBalancerStatsServiceClient) GetClientAccumulatedStats(ctx context.Context, in *LoadBalancerAccumulatedStatsRequest, opts ...grpc.CallOption) (*LoadBalancerAccumulatedStatsResponse, error) { out := new(LoadBalancerAccumulatedStatsResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.LoadBalancerStatsService/GetClientAccumulatedStats", in, out, opts...) + err := c.cc.Invoke(ctx, LoadBalancerStatsService_GetClientAccumulatedStats_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -805,7 +851,7 @@ func _LoadBalancerStatsService_GetClientStats_Handler(srv interface{}, ctx conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.LoadBalancerStatsService/GetClientStats", + FullMethod: LoadBalancerStatsService_GetClientStats_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadBalancerStatsServiceServer).GetClientStats(ctx, req.(*LoadBalancerStatsRequest)) @@ -823,7 +869,7 @@ func _LoadBalancerStatsService_GetClientAccumulatedStats_Handler(srv interface{} } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.LoadBalancerStatsService/GetClientAccumulatedStats", + FullMethod: LoadBalancerStatsService_GetClientAccumulatedStats_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LoadBalancerStatsServiceServer).GetClientAccumulatedStats(ctx, req.(*LoadBalancerAccumulatedStatsRequest)) @@ -851,6 +897,11 @@ var LoadBalancerStatsService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + XdsUpdateHealthService_SetServing_FullMethodName = "/grpc.testing.XdsUpdateHealthService/SetServing" + XdsUpdateHealthService_SetNotServing_FullMethodName = "/grpc.testing.XdsUpdateHealthService/SetNotServing" +) + // XdsUpdateHealthServiceClient is the client API for XdsUpdateHealthService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -869,7 +920,7 @@ func NewXdsUpdateHealthServiceClient(cc grpc.ClientConnInterface) XdsUpdateHealt func (c *xdsUpdateHealthServiceClient) SetServing(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.XdsUpdateHealthService/SetServing", in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateHealthService_SetServing_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -878,7 +929,7 @@ func (c *xdsUpdateHealthServiceClient) SetServing(ctx context.Context, in *Empty func (c *xdsUpdateHealthServiceClient) SetNotServing(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.XdsUpdateHealthService/SetNotServing", in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateHealthService_SetNotServing_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -928,7 +979,7 @@ func _XdsUpdateHealthService_SetServing_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.XdsUpdateHealthService/SetServing", + FullMethod: XdsUpdateHealthService_SetServing_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(XdsUpdateHealthServiceServer).SetServing(ctx, req.(*Empty)) @@ -946,7 +997,7 @@ func _XdsUpdateHealthService_SetNotServing_Handler(srv interface{}, ctx context. } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.XdsUpdateHealthService/SetNotServing", + FullMethod: XdsUpdateHealthService_SetNotServing_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(XdsUpdateHealthServiceServer).SetNotServing(ctx, req.(*Empty)) @@ -974,6 +1025,10 @@ var XdsUpdateHealthService_ServiceDesc = grpc.ServiceDesc{ Metadata: "grpc/testing/test.proto", } +const ( + XdsUpdateClientConfigureService_Configure_FullMethodName = "/grpc.testing.XdsUpdateClientConfigureService/Configure" +) + // XdsUpdateClientConfigureServiceClient is the client API for XdsUpdateClientConfigureService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -992,7 +1047,7 @@ func NewXdsUpdateClientConfigureServiceClient(cc grpc.ClientConnInterface) XdsUp func (c *xdsUpdateClientConfigureServiceClient) Configure(ctx context.Context, in *ClientConfigureRequest, opts ...grpc.CallOption) (*ClientConfigureResponse, error) { out := new(ClientConfigureResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.XdsUpdateClientConfigureService/Configure", in, out, opts...) + err := c.cc.Invoke(ctx, XdsUpdateClientConfigureService_Configure_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -1039,7 +1094,7 @@ func _XdsUpdateClientConfigureService_Configure_Handler(srv interface{}, ctx con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.XdsUpdateClientConfigureService/Configure", + FullMethod: XdsUpdateClientConfigureService_Configure_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(XdsUpdateClientConfigureServiceServer).Configure(ctx, req.(*ClientConfigureRequest)) diff --git a/interop/grpc_testing/worker_service.pb.go b/interop/grpc_testing/worker_service.pb.go index 3effdd6533b4..40b94e1dd231 100644 --- a/interop/grpc_testing/worker_service.pb.go +++ b/interop/grpc_testing/worker_service.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: grpc/testing/worker_service.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - var File_grpc_testing_worker_service_proto protoreflect.FileDescriptor var file_grpc_testing_worker_service_proto_rawDesc = []byte{ @@ -66,7 +61,10 @@ var file_grpc_testing_worker_service_proto_rawDesc = []byte{ 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x51, 0x75, 0x69, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x12, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x1a, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x67, 0x2e, 0x56, 0x6f, 0x69, 0x64, 0x42, 0x27, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x42, 0x12, 0x57, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_grpc_testing_worker_service_proto_goTypes = []interface{}{ diff --git a/interop/grpc_testing/worker_service_grpc.pb.go b/interop/grpc_testing/worker_service_grpc.pb.go index cc49b22b9261..1de7f09f841a 100644 --- a/interop/grpc_testing/worker_service_grpc.pb.go +++ b/interop/grpc_testing/worker_service_grpc.pb.go @@ -1,4 +1,25 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// An integration test service that covers all the method signature permutations +// of unary/streaming requests/responses. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/testing/worker_service.proto package grpc_testing @@ -14,6 +35,13 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + WorkerService_RunServer_FullMethodName = "/grpc.testing.WorkerService/RunServer" + WorkerService_RunClient_FullMethodName = "/grpc.testing.WorkerService/RunClient" + WorkerService_CoreCount_FullMethodName = "/grpc.testing.WorkerService/CoreCount" + WorkerService_QuitWorker_FullMethodName = "/grpc.testing.WorkerService/QuitWorker" +) + // WorkerServiceClient is the client API for WorkerService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -47,7 +75,7 @@ func NewWorkerServiceClient(cc grpc.ClientConnInterface) WorkerServiceClient { } func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) { - stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[0], "/grpc.testing.WorkerService/RunServer", opts...) + stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[0], WorkerService_RunServer_FullMethodName, opts...) if err != nil { return nil, err } @@ -78,7 +106,7 @@ func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) { } func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) { - stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[1], "/grpc.testing.WorkerService/RunClient", opts...) + stream, err := c.cc.NewStream(ctx, &WorkerService_ServiceDesc.Streams[1], WorkerService_RunClient_FullMethodName, opts...) if err != nil { return nil, err } @@ -110,7 +138,7 @@ func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) { func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) { out := new(CoreResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, opts...) + err := c.cc.Invoke(ctx, WorkerService_CoreCount_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -119,7 +147,7 @@ func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, op func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) { out := new(Void) - err := c.cc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, opts...) + err := c.cc.Invoke(ctx, WorkerService_QuitWorker_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -242,7 +270,7 @@ func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.WorkerService/CoreCount", + FullMethod: WorkerService_CoreCount_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest)) @@ -260,7 +288,7 @@ func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.WorkerService/QuitWorker", + FullMethod: WorkerService_QuitWorker_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void)) diff --git a/interop/grpclb_fallback/client.go b/interop/grpclb_fallback/client_linux.go similarity index 66% rename from interop/grpclb_fallback/client.go rename to interop/grpclb_fallback/client_linux.go index 61b2fae6968e..b1cfde71134e 100644 --- a/interop/grpclb_fallback/client.go +++ b/interop/grpclb_fallback/client_linux.go @@ -1,5 +1,3 @@ -// +build linux,!appengine - /* * * Copyright 2019 gRPC authors. @@ -37,22 +35,21 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/credentials/google" + _ "google.golang.org/grpc/xds/googledirectpath" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( - customCredentialsType = flag.String("custom_credentials_type", "", "Client creds to use") - serverURI = flag.String("server_uri", "dns:///staging-grpc-directpath-fallback-test.googleapis.com:443", "The server host name") - unrouteLBAndBackendAddrsCmd = flag.String("unroute_lb_and_backend_addrs_cmd", "", "Command to make LB and backend address unroutable") - blackholeLBAndBackendAddrsCmd = flag.String("blackhole_lb_and_backend_addrs_cmd", "", "Command to make LB and backend addresses blackholed") - testCase = flag.String("test_case", "", + customCredentialsType = flag.String("custom_credentials_type", "", "Client creds to use") + serverURI = flag.String("server_uri", "dns:///staging-grpc-directpath-fallback-test.googleapis.com:443", "The server host name") + induceFallbackCmd = flag.String("induce_fallback_cmd", "", "Command to induce fallback e.g. by making certain addresses unroutable") + fallbackDeadlineSeconds = flag.Int("fallback_deadline_seconds", 1, "How long to wait for fallback to happen after induce_fallback_cmd") + testCase = flag.String("test_case", "", `Configure different test cases. Valid options are: - fast_fallback_before_startup : LB/backend connections fail fast before RPC's have been made; - fast_fallback_after_startup : LB/backend connections fail fast after RPC's have been made; - slow_fallback_before_startup : LB/backend connections black hole before RPC's have been made; - slow_fallback_after_startup : LB/backend connections black hole after RPC's have been made;`) + fallback_before_startup : LB/backend connections fail before RPC's have been made; + fallback_after_startup : LB/backend connections fail after RPC's have been made;`) infoLog = log.New(os.Stderr, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile) errorLog = log.New(os.Stderr, "ERROR: ", log.Ldate|log.Ltime|log.Lshortfile) ) @@ -100,7 +97,6 @@ func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { func createTestConn() *grpc.ClientConn { opts := []grpc.DialOption{ grpc.WithContextDialer(dialTCPUserTimeout), - grpc.WithBlock(), } switch *customCredentialsType { case "tls": @@ -134,7 +130,7 @@ func waitForFallbackAndDoRPCs(client testgrpc.TestServiceClient, fallbackDeadlin fallbackRetryCount := 0 fellBack := false for time.Now().Before(fallbackDeadline) { - g := doRPCAndGetPath(client, 1*time.Second) + g := doRPCAndGetPath(client, 20*time.Second) if g == testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_FALLBACK { infoLog.Println("Made one successul RPC to a fallback. Now expect the same for the rest.") fellBack = true @@ -157,69 +153,39 @@ func waitForFallbackAndDoRPCs(client testgrpc.TestServiceClient, fallbackDeadlin } } -func doFastFallbackBeforeStartup() { - runCmd(*unrouteLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(5 * time.Second) +func doFallbackBeforeStartup() { + runCmd(*induceFallbackCmd) + fallbackDeadline := time.Now().Add(time.Duration(*fallbackDeadlineSeconds) * time.Second) conn := createTestConn() defer conn.Close() client := testgrpc.NewTestServiceClient(conn) waitForFallbackAndDoRPCs(client, fallbackDeadline) } -func doSlowFallbackBeforeStartup() { - runCmd(*blackholeLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(20 * time.Second) - conn := createTestConn() - defer conn.Close() - client := testgrpc.NewTestServiceClient(conn) - waitForFallbackAndDoRPCs(client, fallbackDeadline) -} - -func doFastFallbackAfterStartup() { +func doFallbackAfterStartup() { conn := createTestConn() defer conn.Close() client := testgrpc.NewTestServiceClient(conn) if g := doRPCAndGetPath(client, 20*time.Second); g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { errorLog.Fatalf("Expected RPC to take grpclb route type BACKEND. Got: %v", g) } - runCmd(*unrouteLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(40 * time.Second) - waitForFallbackAndDoRPCs(client, fallbackDeadline) -} - -func doSlowFallbackAfterStartup() { - conn := createTestConn() - defer conn.Close() - client := testgrpc.NewTestServiceClient(conn) - if g := doRPCAndGetPath(client, 20*time.Second); g != testpb.GrpclbRouteType_GRPCLB_ROUTE_TYPE_BACKEND { - errorLog.Fatalf("Expected RPC to take grpclb route type BACKEND. Got: %v", g) - } - runCmd(*blackholeLBAndBackendAddrsCmd) - fallbackDeadline := time.Now().Add(40 * time.Second) + runCmd(*induceFallbackCmd) + fallbackDeadline := time.Now().Add(time.Duration(*fallbackDeadlineSeconds) * time.Second) waitForFallbackAndDoRPCs(client, fallbackDeadline) } func main() { flag.Parse() - if len(*unrouteLBAndBackendAddrsCmd) == 0 { - errorLog.Fatalf("--unroute_lb_and_backend_addrs_cmd unset") - } - if len(*blackholeLBAndBackendAddrsCmd) == 0 { - errorLog.Fatalf("--blackhole_lb_and_backend_addrs_cmd unset") + if len(*induceFallbackCmd) == 0 { + errorLog.Fatalf("--induce_fallback_cmd unset") } switch *testCase { - case "fast_fallback_before_startup": - doFastFallbackBeforeStartup() - log.Printf("FastFallbackBeforeStartup done!\n") - case "fast_fallback_after_startup": - doFastFallbackAfterStartup() - log.Printf("FastFallbackAfterStartup done!\n") - case "slow_fallback_before_startup": - doSlowFallbackBeforeStartup() - log.Printf("SlowFallbackBeforeStartup done!\n") - case "slow_fallback_after_startup": - doSlowFallbackAfterStartup() - log.Printf("SlowFallbackAfterStartup done!\n") + case "fallback_before_startup": + doFallbackBeforeStartup() + log.Printf("FallbackBeforeStartup done!\n") + case "fallback_after_startup": + doFallbackAfterStartup() + log.Printf("FallbackAfterStartup done!\n") default: errorLog.Fatalf("Unsupported test case: %v", *testCase) } diff --git a/interop/http2/negative_http2_client.go b/interop/http2/negative_http2_client.go index 9ed34f75716d..b8c1d522009e 100644 --- a/interop/http2/negative_http2_client.go +++ b/interop/http2/negative_http2_client.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" "google.golang.org/grpc/status" @@ -80,7 +81,7 @@ func rstAfterHeader(tc testgrpc.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { - logger.Fatalf("Client received reply despite server sending rst stream after header") + logger.Fatal("Client received reply despite server sending rst stream after header") } if status.Code(err) != codes.Internal { logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal) @@ -91,7 +92,7 @@ func rstDuringData(tc testgrpc.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { - logger.Fatalf("Client received reply despite server sending rst stream during data") + logger.Fatal("Client received reply despite server sending rst stream during data") } if status.Code(err) != codes.Unknown { logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Unknown) @@ -102,7 +103,7 @@ func rstAfterData(tc testgrpc.TestServiceClient) { req := largeSimpleRequest() reply, err := tc.UnaryCall(context.Background(), req) if reply != nil { - logger.Fatalf("Client received reply despite server sending rst stream after data") + logger.Fatal("Client received reply despite server sending rst stream after data") } if status.Code(err) != codes.Internal { logger.Fatalf("%v.UnaryCall() = _, %v, want _, %v", tc, status.Code(err), codes.Internal) @@ -131,7 +132,7 @@ func main() { flag.Parse() serverAddr := net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) var opts []grpc.DialOption - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) conn, err := grpc.Dial(serverAddr, opts...) if err != nil { logger.Fatalf("Fail to dial: %v", err) diff --git a/interop/interop_test.sh b/interop/interop_test.sh index 5aeaa2aa10a8..7fc290a12c6b 100755 --- a/interop/interop_test.sh +++ b/interop/interop_test.sh @@ -36,13 +36,33 @@ clean () { } fail () { - echo "$(tput setaf 1) $1 $(tput sgr 0)" + echo "$(tput setaf 1) $(date): $1 $(tput sgr 0)" clean exit 1 } pass () { - echo "$(tput setaf 2) $1 $(tput sgr 0)" + echo "$(tput setaf 2) $(date): $1 $(tput sgr 0)" +} + +withTimeout () { + timer=$1 + shift + + # Run command in the background. + cmd=$(printf '%q ' "$@") + eval "$cmd" & + wpid=$! + # Kill after $timer seconds. + sleep $timer && kill $wpid & + kpid=$! + # Wait for the background thread. + wait $wpid + res=$? + # Kill the killer pid in case it's still running. + kill $kpid || true + wait $kpid || true + return $res } # Don't run some tests that need a special environment: @@ -70,24 +90,40 @@ CASES=( "custom_metadata" "unimplemented_method" "unimplemented_service" + "orca_per_rpc" + "orca_oob" ) # Build server +echo "$(tput setaf 4) $(date): building server $(tput sgr 0)" if ! go build -o /dev/null ./interop/server; then fail "failed to build server" else pass "successfully built server" fi +# Build client +echo "$(tput setaf 4) $(date): building client $(tput sgr 0)" +if ! go build -o /dev/null ./interop/client; then + fail "failed to build client" +else + pass "successfully built client" +fi + # Start server SERVER_LOG="$(mktemp)" -go run ./interop/server --use_tls &> $SERVER_LOG & +GRPC_GO_LOG_SEVERITY_LEVEL=info go run ./interop/server --use_tls &> $SERVER_LOG & for case in ${CASES[@]}; do - echo "$(tput setaf 4) testing: ${case} $(tput sgr 0)" + echo "$(tput setaf 4) $(date): testing: ${case} $(tput sgr 0)" CLIENT_LOG="$(mktemp)" - if ! timeout 20 go run ./interop/client --use_tls --server_host_override=foo.test.google.fr --use_test_ca --test_case="${case}" &> $CLIENT_LOG; then + if ! GRPC_GO_LOG_SEVERITY_LEVEL=info withTimeout 20 go run ./interop/client \ + --use_tls \ + --server_host_override=foo.test.google.fr \ + --use_test_ca --test_case="${case}" \ + --service_config_json='{ "loadBalancingConfig": [{ "test_backend_metrics_load_balancer": {} }]}' \ + &> $CLIENT_LOG; then fail "FAIL: test case ${case} got server log: $(cat $SERVER_LOG) @@ -95,7 +131,7 @@ for case in ${CASES[@]}; do $(cat $CLIENT_LOG) " else - pass "PASS: test case ${case}" + pass "PASS: test case ${case}" fi done diff --git a/interop/observability/Dockerfile b/interop/observability/Dockerfile new file mode 100644 index 000000000000..7fcfc6df7865 --- /dev/null +++ b/interop/observability/Dockerfile @@ -0,0 +1,53 @@ +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# +# Stage 1: Build the interop test client and server +# + +FROM golang:1.17.13-bullseye as build + +WORKDIR /grpc-go +COPY . . + +WORKDIR /grpc-go/interop/observability +RUN go build -o server/ server/server.go && \ + go build -o client/ client/client.go + + + +# +# Stage 2: +# +# - Copy only the necessary files to reduce Docker image size. +# - Have an ENTRYPOINT script which will launch the interop test client or server +# with the given parameters. +# + +FROM golang:1.17.13-bullseye + +ENV GRPC_GO_LOG_SEVERITY_LEVEL info +ENV GRPC_GO_LOG_VERBOSITY_LEVEL 2 + +WORKDIR /grpc-go/interop/observability/server +COPY --from=build /grpc-go/interop/observability/server/server . + +WORKDIR /grpc-go/interop/observability/client +COPY --from=build /grpc-go/interop/observability/client/client . + +WORKDIR /grpc-go/interop/observability +COPY --from=build /grpc-go/interop/observability/run.sh . + +ENTRYPOINT ["/grpc-go/interop/observability/run.sh"] diff --git a/interop/observability/build_docker.sh b/interop/observability/build_docker.sh new file mode 100755 index 000000000000..ed7a1811e923 --- /dev/null +++ b/interop/observability/build_docker.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex +cd "$(dirname "$0")"/../.. + +# Environment Variables: +# +# TAG_NAME: the docker image tag name +# + +echo Building ${TAG_NAME} + +docker build --no-cache -t ${TAG_NAME} -f ./interop/observability/Dockerfile . diff --git a/interop/observability/client/client.go b/interop/observability/client/client.go new file mode 100644 index 000000000000..d8cf72fa76c9 --- /dev/null +++ b/interop/observability/client/client.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "context" + "flag" + "log" + "net" + "strconv" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/gcp/observability" + "google.golang.org/grpc/interop" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + serverHost = flag.String("server_host", "localhost", "The server host name") + serverPort = flag.Int("server_port", 10000, "The server port number") + testCase = flag.String("test_case", "large_unary", "The action to perform") + numTimes = flag.Int("num_times", 1, "Number of times to run the test case") +) + +func main() { + err := observability.Start(context.Background()) + if err != nil { + log.Fatalf("observability start failed: %v", err) + } + defer observability.End() + flag.Parse() + serverAddr := *serverHost + if *serverPort != 0 { + serverAddr = net.JoinHostPort(*serverHost, strconv.Itoa(*serverPort)) + } + conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("Fail to dial: %v", err) + } + defer conn.Close() + tc := testgrpc.NewTestServiceClient(conn) + for i := 0; i < *numTimes; i++ { + if *testCase == "ping_pong" { + interop.DoPingPong(tc) + } else if *testCase == "large_unary" { + interop.DoLargeUnaryCall(tc) + } else if *testCase == "custom_metadata" { + interop.DoCustomMetadata(tc) + } else { + log.Fatalf("Invalid test case: %s", *testCase) + } + } + // TODO(stanleycheung): remove this once the observability exporter plugin is able to + // gracefully flush observability data to cloud at shutdown + // TODO(stanleycheung): see if we can reduce the number 65 + const exporterSleepDuration = 65 * time.Second + log.Printf("Sleeping %v before closing...", exporterSleepDuration) + time.Sleep(exporterSleepDuration) +} diff --git a/interop/observability/go.mod b/interop/observability/go.mod new file mode 100644 index 000000000000..99213d553185 --- /dev/null +++ b/interop/observability/go.mod @@ -0,0 +1,50 @@ +module google.golang.org/grpc/interop/observability + +go 1.17 + +require ( + google.golang.org/grpc v1.54.0 + google.golang.org/grpc/gcp/observability v0.0.0-20230214181353-f4feddb37523 +) + +require ( + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/logging v1.7.0 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect + cloud.google.com/go/monitoring v1.13.0 // indirect + cloud.google.com/go/trace v1.9.0 // indirect + contrib.go.opencensus.io/exporter/stackdriver v0.13.12 // indirect + github.com/aws/aws-sdk-go v1.44.162 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/prometheus/prometheus v2.5.0+incompatible // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/api v0.114.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/grpc/stats/opencensus v1.0.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) + +replace google.golang.org/grpc => ../.. + +replace google.golang.org/grpc/gcp/observability => ../../gcp/observability + +replace google.golang.org/grpc/stats/opencensus => ../../stats/opencensus diff --git a/interop/observability/go.sum b/interop/observability/go.sum new file mode 100644 index 000000000000..823435f2f3fd --- /dev/null +++ b/interop/observability/go.sum @@ -0,0 +1,1503 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0 h1:2qsrgXGVoRXpP7otZ14eE1I568zAa92sJSDPyOJvwjM= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0 h1:olxC0QHC59zgJVALtgqfD9tGk0lfeCP5/AGXL3Px/no= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12 h1:bjBKzIf7/TAkxd7L2utGaLM78bmUWlCval5K9UeElbY= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.44.162 h1:hKAd+X+/BLxVMzH+4zKxbQcQQGrk2UhFX0OTu1Mhon8= +github.com/aws/aws-sdk-go v1.44.162/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/interop/observability/run.sh b/interop/observability/run.sh new file mode 100755 index 000000000000..b0494668bf84 --- /dev/null +++ b/interop/observability/run.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex +cd "$(dirname "$0")"/../.. + +if [ "$1" = "server" ] ; then + /grpc-go/interop/observability/server/server "${@:2}" + +elif [ "$1" = "client" ] ; then + /grpc-go/interop/observability/client/client "${@:2}" + +else + echo "Invalid action: $1. Usage:" + echo " $ .../run.sh [server|client] --server_host= --server_port= ..." + exit 1 +fi diff --git a/interop/observability/server/server.go b/interop/observability/server/server.go new file mode 100644 index 000000000000..7efab04bba3f --- /dev/null +++ b/interop/observability/server/server.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package main + +import ( + "context" + "flag" + "fmt" + "log" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/gcp/observability" + "google.golang.org/grpc/interop" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + port = flag.Int("port", 10000, "The server port") +) + +func main() { + err := observability.Start(context.Background()) + if err != nil { + log.Fatalf("observability start failed: %v", err) + } + defer observability.End() + flag.Parse() + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + server := grpc.NewServer() + defer server.Stop() + testgrpc.RegisterTestServiceServer(server, interop.NewTestServer()) + log.Printf("Observability interop server listening on %v", lis.Addr()) + server.Serve(lis) +} diff --git a/interop/orcalb.go b/interop/orcalb.go new file mode 100644 index 000000000000..28ea7524d7b7 --- /dev/null +++ b/interop/orcalb.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package interop + +import ( + "context" + "fmt" + "sync" + "time" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/orca" +) + +func init() { + balancer.Register(orcabb{}) +} + +type orcabb struct{} + +func (orcabb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &orcab{cc: cc} +} + +func (orcabb) Name() string { + return "test_backend_metrics_load_balancer" +} + +type orcab struct { + cc balancer.ClientConn + sc balancer.SubConn + cancelWatch func() + + reportMu sync.Mutex + report *v3orcapb.OrcaLoadReport +} + +func (o *orcab) UpdateClientConnState(s balancer.ClientConnState) error { + if o.sc != nil { + o.sc.UpdateAddresses(s.ResolverState.Addresses) + return nil + } + + if len(s.ResolverState.Addresses) == 0 { + o.ResolverError(fmt.Errorf("produced no addresses")) + return fmt.Errorf("resolver produced no addresses") + } + var err error + o.sc, err = o.cc.NewSubConn(s.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("error creating subconn: %v", err))}) + return nil + } + o.cancelWatch = orca.RegisterOOBListener(o.sc, o, orca.OOBListenerOptions{ReportInterval: time.Second}) + o.sc.Connect() + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) + return nil +} + +func (o *orcab) ResolverError(err error) { + if o.sc == nil { + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("resolver error: %v", err))}) + } +} + +func (o *orcab) UpdateSubConnState(sc balancer.SubConn, scState balancer.SubConnState) { + if o.sc != sc { + logger.Errorf("received subconn update for unknown subconn: %v vs %v", o.sc, sc) + return + } + switch scState.ConnectivityState { + case connectivity.Ready: + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: &scPicker{sc: sc, o: o}}) + case connectivity.TransientFailure: + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(fmt.Errorf("all subchannels in transient failure: %v", scState.ConnectionError))}) + case connectivity.Connecting: + // Ignore; picker already set to "connecting". + case connectivity.Idle: + sc.Connect() + o.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) + case connectivity.Shutdown: + // Ignore; we are closing but handle that in Close instead. + } +} + +func (o *orcab) Close() { + o.cancelWatch() +} + +func (o *orcab) OnLoadReport(r *v3orcapb.OrcaLoadReport) { + o.reportMu.Lock() + defer o.reportMu.Unlock() + logger.Infof("received OOB load report: %v", r) + o.report = r +} + +type scPicker struct { + sc balancer.SubConn + o *orcab +} + +func (p *scPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + doneCB := func(di balancer.DoneInfo) { + if lr, _ := di.ServerLoad.(*v3orcapb.OrcaLoadReport); lr != nil && + (lr.CpuUtilization != 0 || lr.MemUtilization != 0 || len(lr.Utilization) > 0 || len(lr.RequestCost) > 0) { + // Since all RPCs will respond with a load report due to the + // presence of the DialOption, we need to inspect every field and + // use the out-of-band report instead if all are unset/zero. + setContextCMR(info.Ctx, lr) + } else { + p.o.reportMu.Lock() + defer p.o.reportMu.Unlock() + if lr := p.o.report; lr != nil { + setContextCMR(info.Ctx, lr) + } + } + } + return balancer.PickResult{SubConn: p.sc, Done: doneCB}, nil +} + +func setContextCMR(ctx context.Context, lr *v3orcapb.OrcaLoadReport) { + if r := orcaResultFromContext(ctx); r != nil { + *r = lr + } +} + +type orcaKey string + +var orcaCtxKey = orcaKey("orcaResult") + +// contextWithORCAResult sets a key in ctx with a pointer to an ORCA load +// report that is to be filled in by the "test_backend_metrics_load_balancer" +// LB policy's Picker's Done callback. +// +// If a per-call load report is provided from the server for the call, result +// will be filled with that, otherwise the most recent OOB load report is used. +// If no OOB report has been received, result is not modified. +func contextWithORCAResult(ctx context.Context, result **v3orcapb.OrcaLoadReport) context.Context { + return context.WithValue(ctx, orcaCtxKey, result) +} + +// orcaResultFromContext returns the ORCA load report stored in the context. +// The LB policy uses this to communicate the load report back to the interop +// client application. +func orcaResultFromContext(ctx context.Context) **v3orcapb.OrcaLoadReport { + v := ctx.Value(orcaCtxKey) + if v == nil { + return nil + } + return v.(**v3orcapb.OrcaLoadReport) +} diff --git a/interop/server/server.go b/interop/server/server.go index 16360abe9e7b..67fbc3119963 100644 --- a/interop/server/server.go +++ b/interop/server/server.go @@ -17,18 +17,25 @@ */ // Binary server is an interop server. +// +// See interop test case descriptions [here]. +// +// [here]: https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md package main import ( "flag" "net" "strconv" + "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/alts" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" "google.golang.org/grpc/interop" + "google.golang.org/grpc/orca" "google.golang.org/grpc/testdata" testgrpc "google.golang.org/grpc/interop/grpc_testing" @@ -48,14 +55,15 @@ var ( func main() { flag.Parse() if *useTLS && *useALTS { - logger.Fatalf("use_tls and use_alts cannot be both set to true") + logger.Fatal("-use_tls and -use_alts cannot be both set to true") } p := strconv.Itoa(*port) lis, err := net.Listen("tcp", ":"+p) if err != nil { logger.Fatalf("failed to listen: %v", err) } - var opts []grpc.ServerOption + logger.Infof("interop server listening on %v", lis.Addr()) + opts := []grpc.ServerOption{orca.CallMetricsServerOption(nil)} if *useTLS { if *certFile == "" { *certFile = testdata.Path("server1.pem") @@ -65,7 +73,7 @@ func main() { } creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) if err != nil { - logger.Fatalf("Failed to generate credentials %v", err) + logger.Fatalf("Failed to generate credentials: %v", err) } opts = append(opts, grpc.Creds(creds)) } else if *useALTS { @@ -77,6 +85,13 @@ func main() { opts = append(opts, grpc.Creds(altsTC)) } server := grpc.NewServer(opts...) - testgrpc.RegisterTestServiceServer(server, interop.NewTestServer()) + metricsRecorder := orca.NewServerMetricsRecorder() + sopts := orca.ServiceOptions{ + MinReportingInterval: time.Second, + ServerMetricsProvider: metricsRecorder, + } + internal.ORCAAllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&sopts) + orca.Register(server, sopts) + testgrpc.RegisterTestServiceServer(server, interop.NewTestServer(interop.NewTestServerOptions{MetricsRecorder: metricsRecorder})) server.Serve(lis) } diff --git a/interop/test_utils.go b/interop/test_utils.go index cbcbcc4da173..29916876eeb4 100644 --- a/interop/test_utils.go +++ b/interop/test_utils.go @@ -17,25 +17,35 @@ */ // Package interop contains functions used by interop client/server. +// +// See interop test case descriptions [here]. +// +// [here]: https://github.com/grpc/grpc/blob/master/doc/interop-test-descriptions.md package interop import ( + "bytes" "context" "fmt" "io" - "io/ioutil" + "os" "strings" + "sync" "time" "github.com/golang/protobuf/proto" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/grpc" + "google.golang.org/grpc/benchmark/stats" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) @@ -275,7 +285,7 @@ func DoComputeEngineCreds(tc testgrpc.TestServiceClient, serviceAccount, oauthSc } func getServiceAccountJSONKey(keyFile string) []byte { - jsonKey, err := ioutil.ReadFile(keyFile) + jsonKey, err := os.ReadFile(keyFile) if err != nil { logger.Fatalf("Failed to read the service account key file: %v", err) } @@ -651,7 +661,8 @@ func DoPickFirstUnary(tc testgrpc.TestServiceClient) { Payload: pl, FillServerId: true, } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + // TODO(mohanli): Revert the timeout back to 10s once TD migrates to xdstp. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() var serverID string for i := 0; i < rpcCount; i++ { @@ -673,12 +684,119 @@ func DoPickFirstUnary(tc testgrpc.TestServiceClient) { } } +func doOneSoakIteration(ctx context.Context, tc testgrpc.TestServiceClient, resetChannel bool, serverAddr string, dopts []grpc.DialOption, copts []grpc.CallOption) (latency time.Duration, err error) { + start := time.Now() + client := tc + if resetChannel { + var conn *grpc.ClientConn + conn, err = grpc.Dial(serverAddr, dopts...) + if err != nil { + return + } + defer conn.Close() + client = testgrpc.NewTestServiceClient(conn) + } + // per test spec, don't include channel shutdown in latency measurement + defer func() { latency = time.Since(start) }() + // do a large-unary RPC + pl := ClientNewPayload(testpb.PayloadType_COMPRESSABLE, largeReqSize) + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: int32(largeRespSize), + Payload: pl, + } + var reply *testpb.SimpleResponse + reply, err = client.UnaryCall(ctx, req, copts...) + if err != nil { + err = fmt.Errorf("/TestService/UnaryCall RPC failed: %s", err) + return + } + t := reply.GetPayload().GetType() + s := len(reply.GetPayload().GetBody()) + if t != testpb.PayloadType_COMPRESSABLE || s != largeRespSize { + err = fmt.Errorf("got the reply with type %d len %d; want %d, %d", t, s, testpb.PayloadType_COMPRESSABLE, largeRespSize) + return + } + return +} + +// DoSoakTest runs large unary RPCs in a loop for a configurable number of times, with configurable failure thresholds. +// If resetChannel is false, then each RPC will be performed on tc. Otherwise, each RPC will be performed on a new +// stub that is created with the provided server address and dial options. +func DoSoakTest(tc testgrpc.TestServiceClient, serverAddr string, dopts []grpc.DialOption, resetChannel bool, soakIterations int, maxFailures int, perIterationMaxAcceptableLatency time.Duration, minTimeBetweenRPCs time.Duration, overallDeadline time.Time) { + start := time.Now() + ctx, cancel := context.WithDeadline(context.Background(), overallDeadline) + defer cancel() + iterationsDone := 0 + totalFailures := 0 + hopts := stats.HistogramOptions{ + NumBuckets: 20, + GrowthFactor: 1, + BaseBucketSize: 1, + MinValue: 0, + } + h := stats.NewHistogram(hopts) + for i := 0; i < soakIterations; i++ { + if time.Now().After(overallDeadline) { + break + } + earliestNextStart := time.After(minTimeBetweenRPCs) + iterationsDone++ + var p peer.Peer + latency, err := doOneSoakIteration(ctx, tc, resetChannel, serverAddr, dopts, []grpc.CallOption{grpc.Peer(&p)}) + latencyMs := int64(latency / time.Millisecond) + h.Add(latencyMs) + if err != nil { + totalFailures++ + addrStr := "nil" + if p.Addr != nil { + addrStr = p.Addr.String() + } + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s server_uri: %s failed: %s\n", i, latencyMs, addrStr, serverAddr, err) + <-earliestNextStart + continue + } + if latency > perIterationMaxAcceptableLatency { + totalFailures++ + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s server_uri: %s exceeds max acceptable latency: %d\n", i, latencyMs, p.Addr.String(), serverAddr, perIterationMaxAcceptableLatency.Milliseconds()) + <-earliestNextStart + continue + } + fmt.Fprintf(os.Stderr, "soak iteration: %d elapsed_ms: %d peer: %s server_uri: %s succeeded\n", i, latencyMs, p.Addr.String(), serverAddr) + <-earliestNextStart + } + var b bytes.Buffer + h.Print(&b) + fmt.Fprintf(os.Stderr, "(server_uri: %s) histogram of per-iteration latencies in milliseconds: %s\n", serverAddr, b.String()) + fmt.Fprintf(os.Stderr, "(server_uri: %s) soak test ran: %d / %d iterations. total failures: %d. max failures threshold: %d. See breakdown above for which iterations succeeded, failed, and why for more info.\n", serverAddr, iterationsDone, soakIterations, totalFailures, maxFailures) + if iterationsDone < soakIterations { + logger.Fatalf("(server_uri: %s) soak test consumed all %f seconds of time and quit early, only having ran %d out of desired %d iterations.", serverAddr, overallDeadline.Sub(start).Seconds(), iterationsDone, soakIterations) + } + if totalFailures > maxFailures { + logger.Fatalf("(server_uri: %s) soak test total failures: %d exceeds max failures threshold: %d.", serverAddr, totalFailures, maxFailures) + } +} + type testServer struct { testgrpc.UnimplementedTestServiceServer + + orcaMu sync.Mutex + metricsRecorder orca.ServerMetricsRecorder +} + +// NewTestServerOptions contains options that control the behavior of the test +// server returned by NewTestServer. +type NewTestServerOptions struct { + MetricsRecorder orca.ServerMetricsRecorder } -// NewTestServer creates a test server for test service. -func NewTestServer() testgrpc.TestServiceServer { +// NewTestServer creates a test server for test service. opts carries optional +// settings and does not need to be provided. If multiple opts are provided, +// only the first one is used. +func NewTestServer(opts ...NewTestServerOptions) testgrpc.TestServiceServer { + if len(opts) > 0 { + return &testServer{metricsRecorder: opts[0].MetricsRecorder} + } return &testServer{} } @@ -721,11 +839,29 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* if err != nil { return nil, err } + if r, orcaData := orca.CallMetricsRecorderFromContext(ctx), in.GetOrcaPerQueryReport(); r != nil && orcaData != nil { + // Transfer the request's per-Call ORCA data to the call metrics + // recorder in the context, if present. + setORCAMetrics(r, orcaData) + } return &testpb.SimpleResponse{ Payload: pl, }, nil } +func setORCAMetrics(r orca.ServerMetricsRecorder, orcaData *testpb.TestOrcaReport) { + r.SetCPUUtilization(orcaData.CpuUtilization) + r.SetMemoryUtilization(orcaData.MemoryUtilization) + if rq, ok := r.(orca.CallMetricsRecorder); ok { + for k, v := range orcaData.RequestCost { + rq.SetRequestCost(k, v) + } + } + for k, v := range orcaData.Utilization { + r.SetNamedUtilization(k, v) + } +} + func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error { cs := args.GetResponseParameters() for _, c := range cs { @@ -773,6 +909,7 @@ func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallSe stream.SetTrailer(trailer) } } + hasORCALock := false for { in, err := stream.Recv() if err == io.EOF { @@ -786,6 +923,18 @@ func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallSe if st != nil && st.Code != 0 { return status.Error(codes.Code(st.Code), st.Message) } + + if r, orcaData := s.metricsRecorder, in.GetOrcaOobReport(); r != nil && orcaData != nil { + // Transfer the request's OOB ORCA data to the server metrics recorder + // in the server, if present. + if !hasORCALock { + s.orcaMu.Lock() + defer s.orcaMu.Unlock() + hasORCALock = true + } + setORCAMetrics(r, orcaData) + } + cs := in.GetResponseParameters() for _, c := range cs { if us := c.GetIntervalUs(); us > 0 { @@ -836,3 +985,102 @@ func (s *testServer) HalfDuplexCall(stream testgrpc.TestService_HalfDuplexCallSe } return nil } + +// DoORCAPerRPCTest performs a unary RPC that enables ORCA per-call reporting +// and verifies the load report sent back to the LB policy's Done callback. +func DoORCAPerRPCTest(tc testgrpc.TestServiceClient) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + orcaRes := &v3orcapb.OrcaLoadReport{} + _, err := tc.UnaryCall(contextWithORCAResult(ctx, &orcaRes), &testpb.SimpleRequest{ + OrcaPerQueryReport: &testpb.TestOrcaReport{ + CpuUtilization: 0.8210, + MemoryUtilization: 0.5847, + RequestCost: map[string]float64{"cost": 3456.32}, + Utilization: map[string]float64{"util": 0.30499}, + }, + }) + if err != nil { + logger.Fatalf("/TestService/UnaryCall RPC failed: ", err) + } + want := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 0.8210, + MemUtilization: 0.5847, + RequestCost: map[string]float64{"cost": 3456.32}, + Utilization: map[string]float64{"util": 0.30499}, + } + if !proto.Equal(orcaRes, want) { + logger.Fatalf("/TestService/UnaryCall RPC received ORCA load report %+v; want %+v", orcaRes, want) + } +} + +// DoORCAOOBTest performs a streaming RPC that enables ORCA OOB reporting and +// verifies the load report sent to the LB policy's OOB listener. +func DoORCAOOBTest(tc testgrpc.TestServiceClient) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error starting stream: %v", err) + } + err = stream.Send(&testpb.StreamingOutputCallRequest{ + OrcaOobReport: &testpb.TestOrcaReport{ + CpuUtilization: 0.8210, + MemoryUtilization: 0.5847, + Utilization: map[string]float64{"util": 0.30499}, + }, + ResponseParameters: []*testpb.ResponseParameters{{Size: 1}}, + }) + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error sending: %v", err) + } + _, err = stream.Recv() + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error receiving: %v", err) + } + + want := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 0.8210, + MemUtilization: 0.5847, + Utilization: map[string]float64{"util": 0.30499}, + } + checkORCAMetrics(ctx, tc, want) + + err = stream.Send(&testpb.StreamingOutputCallRequest{ + OrcaOobReport: &testpb.TestOrcaReport{ + CpuUtilization: 0.29309, + MemoryUtilization: 0.2, + Utilization: map[string]float64{"util": 0.2039}, + }, + ResponseParameters: []*testpb.ResponseParameters{{Size: 1}}, + }) + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error sending: %v", err) + } + _, err = stream.Recv() + if err != nil { + logger.Fatalf("/TestService/FullDuplexCall received error receiving: %v", err) + } + + want = &v3orcapb.OrcaLoadReport{ + CpuUtilization: 0.29309, + MemUtilization: 0.2, + Utilization: map[string]float64{"util": 0.2039}, + } + checkORCAMetrics(ctx, tc, want) +} + +func checkORCAMetrics(ctx context.Context, tc testgrpc.TestServiceClient, want *v3orcapb.OrcaLoadReport) { + for ctx.Err() == nil { + orcaRes := &v3orcapb.OrcaLoadReport{} + if _, err := tc.UnaryCall(contextWithORCAResult(ctx, &orcaRes), &testpb.SimpleRequest{}); err != nil { + logger.Fatalf("/TestService/UnaryCall RPC failed: ", err) + } + if proto.Equal(orcaRes, want) { + return + } + logger.Infof("/TestService/UnaryCall RPC received ORCA load report %+v; want %+v", orcaRes, want) + time.Sleep(time.Second) + } + logger.Fatalf("timed out waiting for expected ORCA load report") +} diff --git a/interop/xds/client/Dockerfile b/interop/xds/client/Dockerfile new file mode 100644 index 000000000000..0d2c44a521af --- /dev/null +++ b/interop/xds/client/Dockerfile @@ -0,0 +1,37 @@ +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for building the xDS interop client. To build the image, run the +# following command from grpc-go directory: +# docker build -t -f interop/xds/client/Dockerfile . + +FROM golang:1.19-alpine as build + +# Make a grpc-go directory and copy the repo into it. +WORKDIR /go/src/grpc-go +COPY . . + +# Build a static binary without cgo so that we can copy just the binary in the +# final image, and can get rid of Go compiler and gRPC-Go dependencies. +RUN go build -tags osusergo,netgo interop/xds/client/client.go + +# Second stage of the build which copies over only the client binary and skips +# the Go compiler and gRPC repo from the earlier stage. This significantly +# reduces the docker image size. +FROM alpine +COPY --from=build /go/src/grpc-go/client . +ENV GRPC_GO_LOG_VERBOSITY_LEVEL=2 +ENV GRPC_GO_LOG_SEVERITY_LEVEL="info" +ENV GRPC_GO_LOG_FORMATTER="json" +ENTRYPOINT ["./client"] diff --git a/interop/xds/client/client.go b/interop/xds/client/client.go index 5b755272d3e7..ff03428e1105 100644 --- a/interop/xds/client/client.go +++ b/interop/xds/client/client.go @@ -31,14 +31,19 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/admin" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/reflection" "google.golang.org/grpc/status" _ "google.golang.org/grpc/xds" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" + _ "google.golang.org/grpc/interop/xds" // to register Custom LB. ) func init() { @@ -173,6 +178,7 @@ var ( rpcTimeout = flag.Duration("rpc_timeout", 20*time.Second, "Per RPC timeout") server = flag.String("server", "localhost:8080", "Address of server to connect to") statsPort = flag.Int("stats_port", 8081, "Port to expose peer distribution stats service") + secureMode = flag.Bool("secure_mode", false, "If true, retrieve security configuration from the management server. Else, use insecure credentials.") rpcCfgs atomic.Value @@ -309,12 +315,13 @@ const ( emptyCall string = "EmptyCall" ) -func parseRPCTypes(rpcStr string) (ret []string) { +func parseRPCTypes(rpcStr string) []string { if len(rpcStr) == 0 { return []string{unaryCall} } rpcs := strings.Split(rpcStr, ",") + ret := make([]string, 0, len(rpcStr)) for _, r := range rpcs { switch r { case unaryCall, emptyCall: @@ -324,7 +331,7 @@ func parseRPCTypes(rpcStr string) (ret []string) { log.Fatalf("unsupported RPC type: %v", r) } } - return + return ret } type rpcConfig struct { @@ -334,7 +341,8 @@ type rpcConfig struct { } // parseRPCMetadata turns EmptyCall:key1:value1 into -// {typ: emptyCall, md: {key1:value1}}. +// +// {typ: emptyCall, md: {key1:value1}}. func parseRPCMetadata(rpcMetadataStr string, rpcs []string) []*rpcConfig { rpcMetadataSplit := strings.Split(rpcMetadataStr, ",") rpcsToMD := make(map[string][]string) @@ -370,11 +378,26 @@ func main() { defer s.Stop() testgrpc.RegisterLoadBalancerStatsServiceServer(s, &statsService{}) testgrpc.RegisterXdsUpdateClientConfigureServiceServer(s, &configureService{}) + reflection.Register(s) + cleanup, err := admin.Register(s) + if err != nil { + logger.Fatalf("Failed to register admin: %v", err) + } + defer cleanup() go s.Serve(lis) + creds := insecure.NewCredentials() + if *secureMode { + var err error + creds, err = xds.NewClientCredentials(xds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + logger.Fatalf("Failed to create xDS credentials: %v", err) + } + } + clients := make([]testgrpc.TestServiceClient, *numChannels) for i := 0; i < *numChannels; i++ { - conn, err := grpc.Dial(*server, grpc.WithInsecure()) + conn, err := grpc.Dial(*server, grpc.WithTransportCredentials(creds)) if err != nil { logger.Fatalf("Fail to dial: %v", err) } diff --git a/interop/xds/custom_lb.go b/interop/xds/custom_lb.go new file mode 100644 index 000000000000..a08d82554008 --- /dev/null +++ b/interop/xds/custom_lb.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xds contains various xds interop helpers for usage in interop tests. +package xds + +import ( + "encoding/json" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + balancer.Register(rpcBehaviorBB{}) +} + +const name = "test.RpcBehaviorLoadBalancer" + +type rpcBehaviorBB struct{} + +func (rpcBehaviorBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &rpcBehaviorLB{ + ClientConn: cc, + } + // round_robin child to complete balancer tree with a usable leaf policy and + // have RPCs actually work. + builder := balancer.Get(roundrobin.Name) + if builder == nil { + // Shouldn't happen, defensive programming. Registered from import of + // roundrobin package. + return nil + } + rr := builder.Build(b, bOpts) + if rr == nil { + // Shouldn't happen, defensive programming. + return nil + } + b.Balancer = rr + return b +} + +func (rpcBehaviorBB) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &lbConfig{} + if err := json.Unmarshal(s, lbCfg); err != nil { + return nil, fmt.Errorf("rpc-behavior-lb: unable to marshal lbConfig: %s, error: %v", string(s), err) + } + return lbCfg, nil + +} + +func (rpcBehaviorBB) Name() string { + return name +} + +type lbConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + RPCBehavior string `json:"rpcBehavior,omitempty"` +} + +// rpcBehaviorLB is a load balancer that wraps a round robin balancer and +// appends the rpc-behavior metadata field to any metadata in pick results based +// on what is specified in configuration. +type rpcBehaviorLB struct { + // embed a ClientConn to wrap only UpdateState() operation + balancer.ClientConn + // embed a Balancer to wrap only UpdateClientConnState() operation + balancer.Balancer + + mu sync.Mutex + cfg *lbConfig +} + +func (b *rpcBehaviorLB) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*lbConfig) + if !ok { + return fmt.Errorf("test.RpcBehaviorLoadBalancer:received config with unexpected type %T: %s", s.BalancerConfig, pretty.ToJSON(s.BalancerConfig)) + } + b.mu.Lock() + b.cfg = lbCfg + b.mu.Unlock() + return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + }) +} + +func (b *rpcBehaviorLB) UpdateState(state balancer.State) { + b.mu.Lock() + rpcBehavior := b.cfg.RPCBehavior + b.mu.Unlock() + + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: newRPCBehaviorPicker(state.Picker, rpcBehavior), + }) +} + +// rpcBehaviorPicker wraps a picker and adds the rpc-behavior metadata field +// into the child pick result's metadata. +type rpcBehaviorPicker struct { + childPicker balancer.Picker + rpcBehavior string +} + +// Pick appends the rpc-behavior metadata entry to the pick result of the child. +func (p *rpcBehaviorPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + pr, err := p.childPicker.Pick(info) + if err != nil { + return balancer.PickResult{}, err + } + pr.Metadata = metadata.Join(pr.Metadata, metadata.Pairs("rpc-behavior", p.rpcBehavior)) + return pr, nil +} + +func newRPCBehaviorPicker(childPicker balancer.Picker, rpcBehavior string) *rpcBehaviorPicker { + return &rpcBehaviorPicker{ + childPicker: childPicker, + rpcBehavior: rpcBehavior, + } +} diff --git a/interop/xds/custom_lb_test.go b/interop/xds/custom_lb_test.go new file mode 100644 index 000000000000..fc3a7f71c5c9 --- /dev/null +++ b/interop/xds/custom_lb_test.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" +) + +var defaultTestTimeout = 5 * time.Second + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestCustomLB tests the Custom LB for the interop client. It configures the +// custom lb as the top level Load Balancing policy of the channel, then asserts +// it can successfully make an RPC and also that the rpc behavior the Custom LB +// is configured with makes it's way to the server in metadata. +func (s) TestCustomLB(t *testing.T) { + errCh := testutils.NewChannel() + // Setup a backend which verifies the expected rpc-behavior metadata is + // present in the request. + backend := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + errCh.Send(errors.New("failed to receive metadata")) + return &testpb.SimpleResponse{}, nil + } + rpcBMD := md.Get("rpc-behavior") + if len(rpcBMD) != 1 { + errCh.Send(errors.New("only one value received for metadata key rpc-behavior")) + return &testpb.SimpleResponse{}, nil + } + wantVal := "error-code-0" + if rpcBMD[0] != wantVal { + errCh.Send(fmt.Errorf("metadata val for key \"rpc-behavior\": got val %v, want val %v", rpcBMD[0], wantVal)) + return &testpb.SimpleResponse{}, nil + } + // Success. + errCh.Send(nil) + return &testpb.SimpleResponse{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started good TestService backend at: %q", backend.Address) + defer backend.Stop() + + lbCfgJSON := `{ + "loadBalancingConfig": [ + { + "test.RpcBehaviorLoadBalancer": { + "rpcBehavior": "error-code-0" + } + } + ] + }` + + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(lbCfgJSON) + mr := manual.NewBuilderWithScheme("customlb-e2e") + defer mr.Close() + mr.InitialState(resolver.State{ + Addresses: []resolver.Address{ + {Addr: backend.Address}, + }, + ServiceConfig: sc, + }) + + cc, err := grpc.Dial(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testServiceClient := testgrpc.NewTestServiceClient(cc) + + // Make a Unary RPC. This RPC should be successful due to the round_robin + // leaf balancer. Also, the custom load balancer should inject the + // "rpc-behavior" string it is configured with into the metadata sent to + // server. + if _, err := testServiceClient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("error receiving from errCh: %v", err) + } + + // Should receive nil on the error channel which implies backend verified it + // correctly received the correct "rpc-behavior" metadata. + if err, ok := val.(error); ok { + t.Fatalf("error in backend verifications on metadata received: %v", err) + } +} diff --git a/interop/xds/server/Dockerfile b/interop/xds/server/Dockerfile new file mode 100644 index 000000000000..db5b2940953d --- /dev/null +++ b/interop/xds/server/Dockerfile @@ -0,0 +1,37 @@ +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Dockerfile for building the xDS interop server. To build the image, run the +# following command from grpc-go directory: +# docker build -t -f interop/xds/server/Dockerfile . + +FROM golang:1.19-alpine as build + +# Make a grpc-go directory and copy the repo into it. +WORKDIR /go/src/grpc-go +COPY . . + +# Build a static binary without cgo so that we can copy just the binary in the +# final image, and can get rid of the Go compiler and gRPC-Go dependencies. +RUN go build -tags osusergo,netgo interop/xds/server/server.go + +# Second stage of the build which copies over only the client binary and skips +# the Go compiler and gRPC repo from the earlier stage. This significantly +# reduces the docker image size. +FROM alpine +COPY --from=build /go/src/grpc-go/server . +ENV GRPC_GO_LOG_VERBOSITY_LEVEL=2 +ENV GRPC_GO_LOG_SEVERITY_LEVEL="info" +ENV GRPC_GO_LOG_FORMATTER="json" +ENTRYPOINT ["./server"] diff --git a/interop/xds/server/server.go b/interop/xds/server/server.go index 4989eb728eec..a45a893c392c 100644 --- a/interop/xds/server/server.go +++ b/interop/xds/server/server.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,34 +16,47 @@ * */ -// Binary server for xDS interop tests. +// Binary server is the server used for xDS interop tests. package main import ( "context" "flag" + "fmt" "log" "net" "os" - "strconv" "google.golang.org/grpc" + "google.golang.org/grpc/admin" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/xds" + xdscreds "google.golang.org/grpc/credentials/xds" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" testgrpc "google.golang.org/grpc/interop/grpc_testing" testpb "google.golang.org/grpc/interop/grpc_testing" ) var ( - port = flag.Int("port", 8080, "The server port") - serverID = flag.String("server_id", "go_server", "Server ID included in response") - hostname = getHostname() + port = flag.Int("port", 8080, "Listening port for test service") + maintenancePort = flag.Int("maintenance_port", 8081, "Listening port for maintenance services like health, reflection, channelz etc when -secure_mode is true. When -secure_mode is false, all these services will be registered on -port") + serverID = flag.String("server_id", "go_server", "Server ID included in response") + secureMode = flag.Bool("secure_mode", false, "If true, retrieve security configuration from the management server. Else, use insecure credentials.") + hostNameOverride = flag.String("host_name_override", "", "If set, use this as the hostname instead of the real hostname") logger = grpclog.Component("interop") ) func getHostname() string { + if *hostNameOverride != "" { + return *hostNameOverride + } hostname, err := os.Hostname() if err != nil { log.Fatalf("failed to get hostname: %v", err) @@ -51,28 +64,127 @@ func getHostname() string { return hostname } -type server struct { +// testServiceImpl provides an implementation of the TestService defined in +// grpc.testing package. +type testServiceImpl struct { testgrpc.UnimplementedTestServiceServer + hostname string + serverID string } -func (s *server) EmptyCall(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { - grpc.SetHeader(ctx, metadata.Pairs("hostname", hostname)) +func (s *testServiceImpl) EmptyCall(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + grpc.SetHeader(ctx, metadata.Pairs("hostname", s.hostname)) return &testpb.Empty{}, nil } -func (s *server) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - grpc.SetHeader(ctx, metadata.Pairs("hostname", hostname)) - return &testpb.SimpleResponse{ServerId: *serverID, Hostname: hostname}, nil +func (s *testServiceImpl) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + grpc.SetHeader(ctx, metadata.Pairs("hostname", s.hostname)) + return &testpb.SimpleResponse{ServerId: s.serverID, Hostname: s.hostname}, nil +} + +// xdsUpdateHealthServiceImpl provides an implementation of the +// XdsUpdateHealthService defined in grpc.testing package. +type xdsUpdateHealthServiceImpl struct { + testgrpc.UnimplementedXdsUpdateHealthServiceServer + healthServer *health.Server +} + +func (x *xdsUpdateHealthServiceImpl) SetServing(_ context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + x.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + return &testpb.Empty{}, nil + +} + +func (x *xdsUpdateHealthServiceImpl) SetNotServing(_ context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + x.healthServer.SetServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) + return &testpb.Empty{}, nil +} + +func xdsServingModeCallback(addr net.Addr, args xds.ServingModeChangeArgs) { + logger.Infof("Serving mode callback for xDS server at %q invoked with mode: %q, err: %v", addr.String(), args.Mode, args.Err) } func main() { flag.Parse() - p := strconv.Itoa(*port) - lis, err := net.Listen("tcp", ":"+p) + + if *secureMode && *port == *maintenancePort { + logger.Fatal("-port and -maintenance_port must be different when -secure_mode is set") + } + + testService := &testServiceImpl{hostname: getHostname(), serverID: *serverID} + healthServer := health.NewServer() + updateHealthService := &xdsUpdateHealthServiceImpl{healthServer: healthServer} + + // If -secure_mode is not set, expose all services on -port with a regular + // gRPC server. + if !*secureMode { + addr := fmt.Sprintf(":%d", *port) + lis, err := net.Listen("tcp4", addr) + if err != nil { + logger.Fatalf("net.Listen(%s) failed: %v", addr, err) + } + + server := grpc.NewServer() + testgrpc.RegisterTestServiceServer(server, testService) + healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + healthgrpc.RegisterHealthServer(server, healthServer) + testgrpc.RegisterXdsUpdateHealthServiceServer(server, updateHealthService) + reflection.Register(server) + cleanup, err := admin.Register(server) + if err != nil { + logger.Fatalf("Failed to register admin services: %v", err) + } + defer cleanup() + if err := server.Serve(lis); err != nil { + logger.Errorf("Serve() failed: %v", err) + } + return + } + + // Create a listener on -port to expose the test service. + addr := fmt.Sprintf(":%d", *port) + testLis, err := net.Listen("tcp4", addr) if err != nil { - logger.Fatalf("failed to listen: %v", err) + logger.Fatalf("net.Listen(%s) failed: %v", addr, err) + } + + // Create server-side xDS credentials with a plaintext fallback. + creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + logger.Fatalf("Failed to create xDS credentials: %v", err) + } + + // Create an xDS enabled gRPC server, register the test service + // implementation and start serving. + testServer := xds.NewGRPCServer(grpc.Creds(creds), xds.ServingModeCallback(xdsServingModeCallback)) + testgrpc.RegisterTestServiceServer(testServer, testService) + go func() { + if err := testServer.Serve(testLis); err != nil { + logger.Errorf("test server Serve() failed: %v", err) + } + }() + defer testServer.Stop() + + // Create a listener on -maintenance_port to expose other services. + addr = fmt.Sprintf(":%d", *maintenancePort) + maintenanceLis, err := net.Listen("tcp4", addr) + if err != nil { + logger.Fatalf("net.Listen(%s) failed: %v", addr, err) + } + + // Create a regular gRPC server and register the maintenance services on + // it and start serving. + maintenanceServer := grpc.NewServer() + healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + healthgrpc.RegisterHealthServer(maintenanceServer, healthServer) + testgrpc.RegisterXdsUpdateHealthServiceServer(maintenanceServer, updateHealthService) + reflection.Register(maintenanceServer) + cleanup, err := admin.Register(maintenanceServer) + if err != nil { + logger.Fatalf("Failed to register admin services: %v", err) + } + defer cleanup() + if err := maintenanceServer.Serve(maintenanceLis); err != nil { + logger.Errorf("maintenance server Serve() failed: %v", err) } - s := grpc.NewServer() - testgrpc.RegisterTestServiceServer(s, &server{}) - s.Serve(lis) } diff --git a/interop/xds_federation/client.go b/interop/xds_federation/client.go new file mode 100644 index 000000000000..31ec9bba7a36 --- /dev/null +++ b/interop/xds_federation/client.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Binary client is an interop client. +package main + +import ( + "flag" + "strings" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/interop" + + _ "google.golang.org/grpc/balancer/grpclb" // Register the grpclb load balancing policy. + _ "google.golang.org/grpc/balancer/rls" // Register the RLS load balancing policy. + _ "google.golang.org/grpc/xds/googledirectpath" // Register xDS resolver required for c2p directpath. + + testgrpc "google.golang.org/grpc/interop/grpc_testing" +) + +const ( + computeEngineCredsName = "compute_engine_channel_creds" + insecureCredsName = "INSECURE_CREDENTIALS" +) + +var ( + serverURIs = flag.String("server_uris", "", "Comma-separated list of sever URIs to make RPCs to") + credentialsTypes = flag.String("credentials_types", "", "Comma-separated list of credentials, each entry is used for the server of the corresponding index in server_uris. Supported values: compute_engine_channel_creds, INSECURE_CREDENTIALS") + soakIterations = flag.Int("soak_iterations", 10, "The number of iterations to use for the two soak tests: rpc_soak and channel_soak") + soakMaxFailures = flag.Int("soak_max_failures", 0, "The number of iterations in soak tests that are allowed to fail (either due to non-OK status code or exceeding the per-iteration max acceptable latency).") + soakPerIterationMaxAcceptableLatencyMs = flag.Int("soak_per_iteration_max_acceptable_latency_ms", 1000, "The number of milliseconds a single iteration in the two soak tests (rpc_soak and channel_soak) should take.") + soakOverallTimeoutSeconds = flag.Int("soak_overall_timeout_seconds", 10, "The overall number of seconds after which a soak test should stop and fail, if the desired number of iterations have not yet completed.") + soakMinTimeMsBetweenRPCs = flag.Int("soak_min_time_ms_between_rpcs", 0, "The minimum time in milliseconds between consecutive RPCs in a soak test (rpc_soak or channel_soak), useful for limiting QPS") + testCase = flag.String("test_case", "rpc_soak", + `Configure different test cases. Valid options are: + rpc_soak: sends --soak_iterations large_unary RPCs; + channel_soak: sends --soak_iterations RPCs, rebuilding the channel each time`) + + logger = grpclog.Component("interop") +) + +type clientConfig struct { + tc testgrpc.TestServiceClient + opts []grpc.DialOption + uri string +} + +func main() { + flag.Parse() + // validate flags + uris := strings.Split(*serverURIs, ",") + creds := strings.Split(*credentialsTypes, ",") + if len(uris) != len(creds) { + logger.Fatalf("Number of entries in --server_uris (%d) != number of entries in --credentials_types (%d)", len(uris), len(creds)) + } + for _, c := range creds { + if c != computeEngineCredsName && c != insecureCredsName { + logger.Fatalf("Unsupported credentials type: %v", c) + } + } + var resetChannel bool + switch *testCase { + case "rpc_soak": + resetChannel = false + case "channel_soak": + resetChannel = true + default: + logger.Fatal("Unsupported test case: ", *testCase) + } + + // create clients as specified in flags + var clients []clientConfig + for i := range uris { + var opts []grpc.DialOption + switch creds[i] { + case computeEngineCredsName: + opts = append(opts, grpc.WithCredentialsBundle(google.NewComputeEngineCredentials())) + case insecureCredsName: + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + cc, err := grpc.Dial(uris[i], opts...) + if err != nil { + logger.Fatalf("Fail to dial %v: %v", uris[i], err) + } + defer cc.Close() + clients = append(clients, clientConfig{ + tc: testgrpc.NewTestServiceClient(cc), + opts: opts, + uri: uris[i], + }) + } + + // run soak tests with the different clients + logger.Infof("Clients running with test case %q", *testCase) + var wg sync.WaitGroup + for i := range clients { + wg.Add(1) + go func(c clientConfig) { + interop.DoSoakTest(c.tc, c.uri, c.opts, resetChannel, *soakIterations, *soakMaxFailures, time.Duration(*soakPerIterationMaxAcceptableLatencyMs)*time.Millisecond, time.Duration(*soakMinTimeMsBetweenRPCs)*time.Millisecond, time.Now().Add(time.Duration(*soakOverallTimeoutSeconds)*time.Second)) + logger.Infof("%s test done for server: %s", *testCase, c.uri) + wg.Done() + }(clients[i]) + } + wg.Wait() + logger.Infoln("All clients done!") +} diff --git a/metadata/metadata.go b/metadata/metadata.go index cf6d1b94781c..a2cdcaf12a87 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -41,16 +41,17 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may // result in errors if set in metadata. func New(m map[string]string) MD { - md := MD{} + md := make(MD, len(m)) for k, val := range m { key := strings.ToLower(k) md[key] = append(md[key], val) @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -74,14 +76,10 @@ func Pairs(kv ...string) MD { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) } - md := MD{} - var key string - for i, s := range kv { - if i%2 == 0 { - key = strings.ToLower(s) - continue - } - md[key] = append(md[key], s) + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) } return md } @@ -93,16 +91,24 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. func (md MD) Get(k string) []string { k = strings.ToLower(k) return md[k] } // Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. func (md MD) Set(k string, vals ...string) { if len(vals) == 0 { return @@ -111,7 +117,10 @@ func (md MD) Set(k string, vals ...string) { md[k] = vals } -// Append adds the values to key k, not overwriting what was already stored at that key. +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. func (md MD) Append(k string, vals ...string) { if len(vals) == 0 { return @@ -120,9 +129,17 @@ func (md MD) Append(k string, vals ...string) { md[k] = append(md[k], vals...) } +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + // Join joins any number of mds into a single MD. -// The order of values for each key is determined by the order in which -// the mds containing those values are presented to Join. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. func Join(mds ...MD) MD { out := MD{} for _, md := range mds { @@ -149,8 +166,8 @@ func NewOutgoingContext(ctx context.Context, md MD) context.Context { } // AppendToOutgoingContext returns a new context with the provided kv merged -// with any existing metadata in the context. Please refer to the -// documentation of Pairs for a description of kv. +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { if len(kv)%2 == 1 { panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) @@ -158,25 +175,76 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } -// FromIncomingContext returns the incoming metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. -func FromIncomingContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdIncomingKey{}).(MD) - return +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents -// of rawMD. Remember to perform strings.ToLower on the keys. The returned -// MD should not be modified. Writing to it may cause races. Modification -// should be made to copies of the returned MD. +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). // -// This is intended for gRPC-internal use ONLY. +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { @@ -186,21 +254,39 @@ func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { return raw.md, raw.added, true } -// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The -// returned MD should not be modified. Writing to it may cause races. -// Modification should be made to copies of the returned MD. +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. func FromOutgoingContext(ctx context.Context) (MD, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, false } - mds := make([]MD, 0, len(raw.added)+1) - mds = append(mds, raw.md) - for _, vv := range raw.added { - mds = append(mds, Pairs(vv...)) + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } } - return Join(mds...), ok + return out, ok } type rawMD struct { diff --git a/metadata/metadata_test.go b/metadata/metadata_test.go index f1fb5f6d324e..9277f2d6c84f 100644 --- a/metadata/metadata_test.go +++ b/metadata/metadata_test.go @@ -169,6 +169,72 @@ func (s) TestAppend(t *testing.T) { } } +func (s) TestDelete(t *testing.T) { + for _, test := range []struct { + md MD + deleteKey string + want MD + }{ + { + md: Pairs("My-Optional-Header", "42"), + deleteKey: "My-Optional-Header", + want: Pairs(), + }, + { + md: Pairs("My-Optional-Header", "42"), + deleteKey: "Other-Key", + want: Pairs("my-optional-header", "42"), + }, + { + md: Pairs("My-Optional-Header", "42"), + deleteKey: "my-OptIoNal-HeAder", + want: Pairs(), + }, + } { + test.md.Delete(test.deleteKey) + if !reflect.DeepEqual(test.md, test.want) { + t.Errorf("value of metadata is %v, want %v", test.md, test.want) + } + } +} + +func (s) TestValueFromIncomingContext(t *testing.T) { + md := Pairs( + "X-My-Header-1", "42", + "X-My-Header-2", "43-1", + "X-My-Header-2", "43-2", + "x-my-header-3", "44", + ) + ctx := NewIncomingContext(context.Background(), md) + + for _, test := range []struct { + key string + want []string + }{ + { + key: "x-my-header-1", + want: []string{"42"}, + }, + { + key: "x-my-header-2", + want: []string{"43-1", "43-2"}, + }, + { + key: "x-my-header-3", + want: []string{"44"}, + }, + { + key: "x-unknown", + want: nil, + }, + } { + v := ValueFromIncomingContext(ctx, test.key) + if !reflect.DeepEqual(v, test.want) { + t.Errorf("value of metadata is %v, want %v", v, test.want) + } + } +} + func (s) TestAppendToOutgoingContext(t *testing.T) { // Pre-existing metadata tCtx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -269,3 +335,21 @@ func BenchmarkFromOutgoingContext(b *testing.B) { FromOutgoingContext(ctx) } } + +func BenchmarkFromIncomingContext(b *testing.B) { + md := Pairs("X-My-Header-1", "42") + ctx := NewIncomingContext(context.Background(), md) + b.ResetTimer() + for n := 0; n < b.N; n++ { + FromIncomingContext(ctx) + } +} + +func BenchmarkValueFromIncomingContext(b *testing.B) { + md := Pairs("X-My-Header-1", "42") + ctx := NewIncomingContext(context.Background(), md) + b.ResetTimer() + for n := 0; n < b.N; n++ { + ValueFromIncomingContext(ctx, "x-my-header-1") + } +} diff --git a/orca/call_metrics.go b/orca/call_metrics.go new file mode 100644 index 000000000000..558c7bce6a8e --- /dev/null +++ b/orca/call_metrics.go @@ -0,0 +1,196 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "context" + "sync" + + "google.golang.org/grpc" + grpcinternal "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" + "google.golang.org/protobuf/proto" +) + +// CallMetricsRecorder allows a service method handler to record per-RPC +// metrics. It contains all utilization-based metrics from +// ServerMetricsRecorder as well as additional request cost metrics. +type CallMetricsRecorder interface { + ServerMetricsRecorder + + // SetRequestCost sets the relevant server metric. + SetRequestCost(name string, val float64) + // DeleteRequestCost deletes the relevant server metric to prevent it + // from being sent. + DeleteRequestCost(name string) + + // SetNamedMetric sets the relevant server metric. + SetNamedMetric(name string, val float64) + // DeleteNamedMetric deletes the relevant server metric to prevent it + // from being sent. + DeleteNamedMetric(name string) +} + +type callMetricsRecorderCtxKey struct{} + +// CallMetricsRecorderFromContext returns the RPC-specific custom metrics +// recorder embedded in the provided RPC context. +// +// Returns nil if no custom metrics recorder is found in the provided context, +// which will be the case when custom metrics reporting is not enabled. +func CallMetricsRecorderFromContext(ctx context.Context) CallMetricsRecorder { + rw, ok := ctx.Value(callMetricsRecorderCtxKey{}).(*recorderWrapper) + if !ok { + return nil + } + return rw.recorder() +} + +// recorderWrapper is a wrapper around a CallMetricsRecorder to ensure that +// concurrent calls to CallMetricsRecorderFromContext() results in only one +// allocation of the underlying metrics recorder, while also allowing for lazy +// initialization of the recorder itself. +type recorderWrapper struct { + once sync.Once + r CallMetricsRecorder + smp ServerMetricsProvider +} + +func (rw *recorderWrapper) recorder() CallMetricsRecorder { + rw.once.Do(func() { + rw.r = newServerMetricsRecorder() + }) + return rw.r +} + +// setTrailerMetadata adds a trailer metadata entry with key being set to +// `internal.TrailerMetadataKey` and value being set to the binary-encoded +// orca.OrcaLoadReport protobuf message. +// +// This function is called from the unary and streaming interceptors defined +// above. Any errors encountered here are not propagated to the caller because +// they are ignored there. Hence we simply log any errors encountered here at +// warning level, and return nothing. +func (rw *recorderWrapper) setTrailerMetadata(ctx context.Context) { + var sm *ServerMetrics + if rw.smp != nil { + sm = rw.smp.ServerMetrics() + sm.merge(rw.r.ServerMetrics()) + } else { + sm = rw.r.ServerMetrics() + } + + b, err := proto.Marshal(sm.toLoadReportProto()) + if err != nil { + logger.Warningf("Failed to marshal load report: %v", err) + return + } + if err := grpc.SetTrailer(ctx, metadata.Pairs(internal.TrailerMetadataKey, string(b))); err != nil { + logger.Warningf("Failed to set trailer metadata: %v", err) + } +} + +var joinServerOptions = grpcinternal.JoinServerOptions.(func(...grpc.ServerOption) grpc.ServerOption) + +// CallMetricsServerOption returns a server option which enables the reporting +// of per-RPC custom backend metrics for unary and streaming RPCs. +// +// Server applications interested in injecting custom backend metrics should +// pass the server option returned from this function as the first argument to +// grpc.NewServer(). +// +// Subsequently, server RPC handlers can retrieve a reference to the RPC +// specific custom metrics recorder [CallMetricsRecorder] to be used, via a call +// to CallMetricsRecorderFromContext(), and inject custom metrics at any time +// during the RPC lifecycle. +// +// The injected custom metrics will be sent as part of trailer metadata, as a +// binary-encoded [ORCA LoadReport] protobuf message, with the metadata key +// being set be "endpoint-load-metrics-bin". +// +// If a non-nil ServerMetricsProvider is provided, the gRPC server will +// transmit the metrics it provides, overwritten by any per-RPC metrics given +// to the CallMetricsRecorder. A ServerMetricsProvider is typically obtained +// by calling NewServerMetricsRecorder. +// +// [ORCA LoadReport]: https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15 +func CallMetricsServerOption(smp ServerMetricsProvider) grpc.ServerOption { + return joinServerOptions(grpc.ChainUnaryInterceptor(unaryInt(smp)), grpc.ChainStreamInterceptor(streamInt(smp))) +} + +func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ctxWithRecorder := newContextWithRecorderWrapper(ctx, rw) + + resp, err := handler(ctxWithRecorder, req) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ctx) + } + return resp, err + } +} + +func streamInt(smp ServerMetricsProvider) func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + // We don't allocate the metric recorder here. It will be allocated the + // first time the user calls CallMetricsRecorderFromContext(). + rw := &recorderWrapper{smp: smp} + ws := &wrappedStream{ + ServerStream: ss, + ctx: newContextWithRecorderWrapper(ss.Context(), rw), + } + + err := handler(srv, ws) + + // It is safe to access the underlying metric recorder inside the wrapper at + // this point, as the user's RPC handler is done executing, and therefore + // there will be no more calls to CallMetricsRecorderFromContext(), which is + // where the metric recorder is lazy allocated. + if rw.r != nil { + rw.setTrailerMetadata(ss.Context()) + } + return err + } +} + +func newContextWithRecorderWrapper(ctx context.Context, r *recorderWrapper) context.Context { + return context.WithValue(ctx, callMetricsRecorderCtxKey{}, r) +} + +// wrappedStream wraps the grpc.ServerStream received by the streaming +// interceptor. Overrides only the Context() method to return a context which +// contains a reference to the CallMetricsRecorder corresponding to this +// stream. +type wrappedStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedStream) Context() context.Context { + return w.ctx +} diff --git a/orca/call_metrics_test.go b/orca/call_metrics_test.go new file mode 100644 index 000000000000..b0e6af646c91 --- /dev/null +++ b/orca/call_metrics_test.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca_test + +import ( + "context" + "errors" + "io" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/orca/internal" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// TestE2ECallMetricsUnary tests the injection of custom backend metrics from +// the server application for a unary RPC, and verifies that expected load +// reports are received at the client. +func (s) TestE2ECallMetricsUnary(t *testing.T) { + tests := []struct { + desc string + injectMetrics bool + wantProto *v3orcapb.OrcaLoadReport + }{ + { + desc: "with custom backend metrics", + injectMetrics: true, + wantProto: &v3orcapb.OrcaLoadReport{ + CpuUtilization: 1.0, + MemUtilization: 0.9, + RequestCost: map[string]float64{"queryCost": 25.0}, + Utilization: map[string]float64{"queueSize": 0.75}, + }, + }, + { + desc: "with no custom backend metrics", + injectMetrics: false, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // A server option to enable reporting of per-call backend metrics. + smr := orca.NewServerMetricsRecorder() + callMetricsServerOption := orca.CallMetricsServerOption(smr) + smr.SetCPUUtilization(1.0) + + // An interceptor to injects custom backend metrics, added only when + // the injectMetrics field in the test is set. + injectingInterceptor := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + recorder := orca.CallMetricsRecorderFromContext(ctx) + if recorder == nil { + err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") + t.Error(err) + return nil, err + } + recorder.SetMemoryUtilization(0.9) + // This value will be overwritten by a write to the same metric + // from the server handler. + recorder.SetNamedUtilization("queueSize", 1.0) + return handler(ctx, req) + } + + // A stub server whose unary handler injects custom metrics, if the + // injectMetrics field in the test is set. It overwrites one of the + // values injected above, by the interceptor. + srv := stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if !test.injectMetrics { + return &testpb.Empty{}, nil + } + recorder := orca.CallMetricsRecorderFromContext(ctx) + if recorder == nil { + err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") + t.Error(err) + return nil, err + } + recorder.SetRequestCost("queryCost", 25.0) + recorder.SetNamedUtilization("queueSize", 0.75) + return &testpb.Empty{}, nil + }, + } + + // Start the stub server with the appropriate server options. + sopts := []grpc.ServerOption{callMetricsServerOption} + if test.injectMetrics { + sopts = append(sopts, grpc.ChainUnaryInterceptor(injectingInterceptor)) + } + if err := srv.StartServer(sopts...); err != nil { + t.Fatalf("Failed to start server: %v", err) + } + defer srv.Stop() + + // Dial the stub server. + cc, err := grpc.Dial(srv.Address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%s) failed: %v", srv.Address, err) + } + defer cc.Close() + + // Make a unary RPC and expect the trailer metadata to contain the custom + // backend metrics as an ORCA LoadReport protobuf message. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + trailer := metadata.MD{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Trailer(&trailer)); err != nil { + t.Fatalf("EmptyCall failed: %v", err) + } + + gotProto, err := internal.ToLoadReport(trailer) + if err != nil { + t.Fatalf("When retrieving load report, got error: %v, want: ", err) + } + if test.wantProto != nil && !cmp.Equal(gotProto, test.wantProto, cmp.Comparer(proto.Equal)) { + t.Fatalf("Received load report in trailer: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(test.wantProto)) + } + }) + } +} + +// TestE2ECallMetricsStreaming tests the injection of custom backend metrics +// from the server application for a streaming RPC, and verifies that expected +// load reports are received at the client. +func (s) TestE2ECallMetricsStreaming(t *testing.T) { + tests := []struct { + desc string + injectMetrics bool + wantProto *v3orcapb.OrcaLoadReport + }{ + { + desc: "with custom backend metrics", + injectMetrics: true, + wantProto: &v3orcapb.OrcaLoadReport{ + CpuUtilization: 1.0, + MemUtilization: 0.5, + RequestCost: map[string]float64{"queryCost": 0.25}, + Utilization: map[string]float64{"queueSize": 0.75}, + }, + }, + { + desc: "with no custom backend metrics", + injectMetrics: false, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // A server option to enable reporting of per-call backend metrics. + smr := orca.NewServerMetricsRecorder() + callMetricsServerOption := orca.CallMetricsServerOption(smr) + smr.SetCPUUtilization(1.0) + + // An interceptor which injects custom backend metrics, added only + // when the injectMetrics field in the test is set. + injectingInterceptor := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + recorder := orca.CallMetricsRecorderFromContext(ss.Context()) + if recorder == nil { + err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") + t.Error(err) + return err + } + recorder.SetMemoryUtilization(0.5) + // This value will be overwritten by a write to the same metric + // from the server handler. + recorder.SetNamedUtilization("queueSize", 1.0) + return handler(srv, ss) + } + + // A stub server whose streaming handler injects custom metrics, if + // the injectMetrics field in the test is set. It overwrites one of + // the values injected above, by the interceptor. + srv := stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if test.injectMetrics { + recorder := orca.CallMetricsRecorderFromContext(stream.Context()) + if recorder == nil { + err := errors.New("Failed to retrieve per-RPC custom metrics recorder from the RPC context") + t.Error(err) + return err + } + recorder.SetRequestCost("queryCost", 0.25) + recorder.SetNamedUtilization("queueSize", 0.75) + } + + // Streaming implementation replies with a dummy response until the + // client closes the stream (in which case it will see an io.EOF), + // or an error occurs while reading/writing messages. + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + payload := &testpb.Payload{Body: make([]byte, 32)} + if err := stream.Send(&testpb.StreamingOutputCallResponse{Payload: payload}); err != nil { + return err + } + } + }, + } + + // Start the stub server with the appropriate server options. + sopts := []grpc.ServerOption{callMetricsServerOption} + if test.injectMetrics { + sopts = append(sopts, grpc.ChainStreamInterceptor(injectingInterceptor)) + } + if err := srv.StartServer(sopts...); err != nil { + t.Fatalf("Failed to start server: %v", err) + } + defer srv.Stop() + + // Dial the stub server. + cc, err := grpc.Dial(srv.Address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%s) failed: %v", srv.Address, err) + } + defer cc.Close() + + // Start the full duplex streaming RPC. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + tc := testgrpc.NewTestServiceClient(cc) + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("FullDuplexCall failed: %v", err) + } + + // Send one request to the server. + payload := &testpb.Payload{Body: make([]byte, 32)} + req := &testpb.StreamingOutputCallRequest{Payload: payload} + if err := stream.Send(req); err != nil { + t.Fatalf("stream.Send() failed: %v", err) + } + // Read one reply from the server. + if _, err := stream.Recv(); err != nil { + t.Fatalf("stream.Recv() failed: %v", err) + } + // Close the sending side. + if err := stream.CloseSend(); err != nil { + t.Fatalf("stream.CloseSend() failed: %v", err) + } + // Make sure it is safe to read the trailer. + for { + if _, err := stream.Recv(); err != nil { + break + } + } + + gotProto, err := internal.ToLoadReport(stream.Trailer()) + if err != nil { + t.Fatalf("When retrieving load report, got error: %v, want: ", err) + } + if test.wantProto != nil && !cmp.Equal(gotProto, test.wantProto, cmp.Comparer(proto.Equal)) { + t.Fatalf("Received load report in trailer: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(test.wantProto)) + } + }) + } +} diff --git a/orca/internal/internal.go b/orca/internal/internal.go new file mode 100644 index 000000000000..35b899d9e877 --- /dev/null +++ b/orca/internal/internal.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains orca-internal code, for testing purposes and to +// avoid polluting the godoc of the top-level orca package. +package internal + +import ( + "errors" + "fmt" + + ibackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// AllowAnyMinReportingInterval prevents clamping of the MinReportingInterval +// configured via ServiceOptions, to a minimum of 30s. +// +// For testing purposes only. +var AllowAnyMinReportingInterval interface{} // func(*ServiceOptions) + +// DefaultBackoffFunc is used by the producer to control its backoff behavior. +// +// For testing purposes only. +var DefaultBackoffFunc = ibackoff.DefaultExponential.Backoff + +// TrailerMetadataKey is the key in which the per-call backend metrics are +// transmitted. +const TrailerMetadataKey = "endpoint-load-metrics-bin" + +// ToLoadReport unmarshals a binary encoded [ORCA LoadReport] protobuf message +// from md and returns the corresponding struct. The load report is expected to +// be stored as the value for key "endpoint-load-metrics-bin". +// +// If no load report was found in the provided metadata, if multiple load +// reports are found, or if the load report found cannot be parsed, an error is +// returned. +// +// [ORCA LoadReport]: (https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto#L15) +func ToLoadReport(md metadata.MD) (*v3orcapb.OrcaLoadReport, error) { + vs := md.Get(TrailerMetadataKey) + if len(vs) == 0 { + return nil, nil + } + if len(vs) != 1 { + return nil, errors.New("multiple orca load reports found in provided metadata") + } + ret := new(v3orcapb.OrcaLoadReport) + if err := proto.Unmarshal([]byte(vs[0]), ret); err != nil { + return nil, fmt.Errorf("failed to unmarshal load report found in metadata: %v", err) + } + return ret, nil +} diff --git a/orca/orca.go b/orca/orca.go new file mode 100644 index 000000000000..6b475562b1b9 --- /dev/null +++ b/orca/orca.go @@ -0,0 +1,57 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package orca implements Open Request Cost Aggregation, which is an open +// standard for request cost aggregation and reporting by backends and the +// corresponding aggregation of such reports by L7 load balancers (such as +// Envoy) on the data plane. In a proxyless world with gRPC enabled +// applications, aggregation of such reports will be done by the gRPC client. +// +// # Experimental +// +// Notice: All APIs is this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package orca + +import ( + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" +) + +var logger = grpclog.Component("orca-backend-metrics") + +// loadParser implements the Parser interface defined in `internal/balancerload` +// package. This interface is used by the client stream to parse load reports +// sent by the server in trailer metadata. The parsed loads are then sent to +// balancers via balancer.DoneInfo. +// +// The grpc package cannot directly call toLoadReport() as that would cause an +// import cycle. Hence this roundabout method is used. +type loadParser struct{} + +func (loadParser) Parse(md metadata.MD) interface{} { + lr, err := internal.ToLoadReport(md) + if err != nil { + logger.Infof("Parse failed: %v", err) + } + return lr +} + +func init() { + balancerload.SetParser(loadParser{}) +} diff --git a/orca/orca_test.go b/orca/orca_test.go new file mode 100644 index 000000000000..4f85e7b01592 --- /dev/null +++ b/orca/orca_test.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca_test + +import ( + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/orca/internal" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const defaultTestTimeout = 5 * time.Second + +func (s) TestToLoadReport(t *testing.T) { + goodReport := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 1.0, + MemUtilization: 50.0, + RequestCost: map[string]float64{"queryCost": 25.0}, + Utilization: map[string]float64{"queueSize": 75.0}, + } + tests := []struct { + name string + md metadata.MD + want *v3orcapb.OrcaLoadReport + wantErr bool + }{ + { + name: "no load report in metadata", + md: metadata.MD{}, + wantErr: false, + }, + { + name: "badly marshaled load report", + md: func() metadata.MD { + return metadata.Pairs("endpoint-load-metrics-bin", string("foo-bar")) + }(), + wantErr: true, + }, + { + name: "multiple load reports", + md: func() metadata.MD { + b, _ := proto.Marshal(goodReport) + return metadata.Pairs("endpoint-load-metrics-bin", string(b), "endpoint-load-metrics-bin", string(b)) + }(), + wantErr: true, + }, + { + name: "good load report", + md: func() metadata.MD { + b, _ := proto.Marshal(goodReport) + return metadata.Pairs("endpoint-load-metrics-bin", string(b)) + }(), + want: goodReport, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := internal.ToLoadReport(test.md) + if (err != nil) != test.wantErr { + t.Fatalf("orca.ToLoadReport(%v) = %v, wantErr: %v", test.md, err, test.wantErr) + } + if test.wantErr { + return + } + if !cmp.Equal(got, test.want, cmp.Comparer(proto.Equal)) { + t.Fatalf("Extracted load report from metadata: %s, want: %s", pretty.ToJSON(got), pretty.ToJSON(test.want)) + } + }) + } +} diff --git a/orca/producer.go b/orca/producer.go new file mode 100644 index 000000000000..ce108aad65ca --- /dev/null +++ b/orca/producer.go @@ -0,0 +1,241 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orca + +import ( + "context" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" + "google.golang.org/protobuf/types/known/durationpb" +) + +type producerBuilder struct{} + +// Build constructs and returns a producer and its cleanup function +func (*producerBuilder) Build(cci interface{}) (balancer.Producer, func()) { + p := &producer{ + client: v3orcaservicegrpc.NewOpenRcaServiceClient(cci.(grpc.ClientConnInterface)), + intervals: make(map[time.Duration]int), + listeners: make(map[OOBListener]struct{}), + backoff: internal.DefaultBackoffFunc, + } + return p, func() { + <-p.stopped + } +} + +var producerBuilderSingleton = &producerBuilder{} + +// OOBListener is used to receive out-of-band load reports as they arrive. +type OOBListener interface { + // OnLoadReport is called when a load report is received. + OnLoadReport(*v3orcapb.OrcaLoadReport) +} + +// OOBListenerOptions contains options to control how an OOBListener is called. +type OOBListenerOptions struct { + // ReportInterval specifies how often to request the server to provide a + // load report. May be provided less frequently if the server requires a + // longer interval, or may be provided more frequently if another + // subscriber requests a shorter interval. + ReportInterval time.Duration +} + +// RegisterOOBListener registers an out-of-band load report listener on sc. +// Any OOBListener may only be registered once per subchannel at a time. The +// returned stop function must be called when no longer needed. Do not +// register a single OOBListener more than once per SubConn. +func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) { + pr, close := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*producer) + + p.registerListener(l, opts.ReportInterval) + + // TODO: When we can register for SubConn state updates, automatically call + // stop() on SHUTDOWN. + + // If stop is called multiple times, prevent it from having any effect on + // subsequent calls. + return grpcsync.OnceFunc(func() { + p.unregisterListener(l, opts.ReportInterval) + close() + }) +} + +type producer struct { + client v3orcaservicegrpc.OpenRcaServiceClient + + // backoff is called between stream attempts to determine how long to delay + // to avoid overloading a server experiencing problems. The attempt count + // is incremented when stream errors occur and is reset when the stream + // reports a result. + backoff func(int) time.Duration + + mu sync.Mutex + intervals map[time.Duration]int // map from interval time to count of listeners requesting that time + listeners map[OOBListener]struct{} // set of registered listeners + minInterval time.Duration + stop func() // stops the current run goroutine + stopped chan struct{} // closed when the run goroutine exits +} + +// registerListener adds the listener and its requested report interval to the +// producer. +func (p *producer) registerListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + p.listeners[l] = struct{}{} + p.intervals[interval]++ + if len(p.listeners) == 1 || interval < p.minInterval { + p.minInterval = interval + p.updateRunLocked() + } +} + +// registerListener removes the listener and its requested report interval to +// the producer. +func (p *producer) unregisterListener(l OOBListener, interval time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + + delete(p.listeners, l) + p.intervals[interval]-- + if p.intervals[interval] == 0 { + delete(p.intervals, interval) + + if p.minInterval == interval { + p.recomputeMinInterval() + p.updateRunLocked() + } + } +} + +// recomputeMinInterval sets p.minInterval to the minimum key's value in +// p.intervals. +func (p *producer) recomputeMinInterval() { + first := true + for interval := range p.intervals { + if first || interval < p.minInterval { + p.minInterval = interval + first = false + } + } +} + +// updateRunLocked is called whenever the run goroutine needs to be started / +// stopped / restarted due to: 1. the initial listener being registered, 2. the +// final listener being unregistered, or 3. the minimum registered interval +// changing. +func (p *producer) updateRunLocked() { + if p.stop != nil { + p.stop() + p.stop = nil + } + if len(p.listeners) > 0 { + var ctx context.Context + ctx, p.stop = context.WithCancel(context.Background()) + p.stopped = make(chan struct{}) + go p.run(ctx, p.stopped, p.minInterval) + } +} + +// run manages the ORCA OOB stream on the subchannel. +func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) { + defer close(done) + + backoffAttempt := 0 + backoffTimer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-backoffTimer.C: + case <-ctx.Done(): + return + } + + resetBackoff, err := p.runStream(ctx, interval) + + if resetBackoff { + backoffTimer.Reset(0) + backoffAttempt = 0 + } else { + backoffTimer.Reset(p.backoff(backoffAttempt)) + backoffAttempt++ + } + + switch { + case err == nil: + // No error was encountered; restart the stream. + case ctx.Err() != nil: + // Producer was stopped; exit immediately and without logging an + // error. + return + case status.Code(err) == codes.Unimplemented: + // Unimplemented; do not retry. + logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") + return + case status.Code(err) == codes.Unavailable, status.Code(err) == codes.Canceled: + // TODO: these codes should ideally log an error, too, but for now + // we receive them when shutting down the ClientConn (Unavailable + // if the stream hasn't started yet, and Canceled if it happens + // mid-stream). Once we can determine the state or ensure the + // producer is stopped before the stream ends, we can log an error + // when it's not a natural shutdown. + default: + // Log all other errors. + logger.Error("Received unexpected stream error:", err) + } + } +} + +// runStream runs a single stream on the subchannel and returns the resulting +// error, if any, and whether or not the run loop should reset the backoff +// timer to zero or advance it. +func (p *producer) runStream(ctx context.Context, interval time.Duration) (resetBackoff bool, err error) { + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := p.client.StreamCoreMetrics(streamCtx, &v3orcaservicepb.OrcaLoadReportRequest{ + ReportInterval: durationpb.New(interval), + }) + if err != nil { + return false, err + } + + for { + report, err := stream.Recv() + if err != nil { + return resetBackoff, err + } + resetBackoff = true + p.mu.Lock() + for l := range p.listeners { + l.OnLoadReport(report) + } + p.mu.Unlock() + } +} diff --git a/orca/producer_test.go b/orca/producer_test.go new file mode 100644 index 000000000000..212cf2500f6b --- /dev/null +++ b/orca/producer_test.go @@ -0,0 +1,547 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package orca_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" +) + +// customLBB wraps a round robin LB policy but provides a ClientConn wrapper to +// add an ORCA OOB report producer for all created SubConns. +type customLBB struct{} + +func (customLBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return balancer.Get(roundrobin.Name).Build(&ccWrapper{ClientConn: cc}, opts) +} + +func (customLBB) Name() string { return "customLB" } + +func init() { + balancer.Register(customLBB{}) +} + +type ccWrapper struct { + balancer.ClientConn +} + +func (w *ccWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + panic(fmt.Sprintf("got addrs=%v; want len(addrs) == 1", addrs)) + } + sc, err := w.ClientConn.NewSubConn(addrs, opts) + if err != nil { + return sc, err + } + l := getListenerInfo(addrs[0]) + l.listener.cleanup = orca.RegisterOOBListener(sc, l.listener, l.opts) + l.sc = sc + return sc, nil +} + +// listenerInfo is stored in an address's attributes to allow ORCA +// listeners to be registered on subconns created for that address. +type listenerInfo struct { + listener *testOOBListener + opts orca.OOBListenerOptions + sc balancer.SubConn // Set by the LB policy +} + +type listenerInfoKey struct{} + +func setListenerInfo(addr resolver.Address, l *listenerInfo) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(listenerInfoKey{}, l) + return addr +} + +func getListenerInfo(addr resolver.Address) *listenerInfo { + return addr.Attributes.Value(listenerInfoKey{}).(*listenerInfo) +} + +// testOOBListener is a simple listener that pushes load reports to a channel. +type testOOBListener struct { + cleanup func() + loadReportCh chan *v3orcapb.OrcaLoadReport +} + +func newTestOOBListener() *testOOBListener { + return &testOOBListener{cleanup: func() {}, loadReportCh: make(chan *v3orcapb.OrcaLoadReport)} +} + +func (t *testOOBListener) Stop() { t.cleanup() } + +func (t *testOOBListener) OnLoadReport(r *v3orcapb.OrcaLoadReport) { + t.loadReportCh <- r +} + +// TestProducer is a basic, end-to-end style test of an LB policy with an +// OOBListener communicating with a server with an ORCA service. +func (s) TestProducer(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Use a fixed backoff for stream recreation. + oldBackoff := internal.DefaultBackoffFunc + internal.DefaultBackoffFunc = func(int) time.Duration { return 10 * time.Millisecond } + defer func() { internal.DefaultBackoffFunc = oldBackoff }() + + // Initialize listener for our ORCA server. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + // Register the OpenRCAService with a very short metrics reporting interval. + const shortReportingInterval = 50 * time.Millisecond + smr := orca.NewServerMetricsRecorder() + opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval, ServerMetricsProvider: smr} + internal.AllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&opts) + s := grpc.NewServer() + if err := orca.Register(s, opts); err != nil { + t.Fatalf("orca.Register failed: %v", err) + } + go s.Serve(lis) + defer s.Stop() + + // Create our client with an OOB listener in the LB policy it selects. + r := manual.NewBuilderWithScheme("whatever") + oobLis := newTestOOBListener() + + lisOpts := orca.OOBListenerOptions{ReportInterval: 50 * time.Millisecond} + li := &listenerInfo{listener: oobLis, opts: lisOpts} + addr := setListenerInfo(resolver.Address{Addr: lis.Addr().String()}, li) + r.InitialState(resolver.State{Addresses: []resolver.Address{addr}}) + cc, err := grpc.Dial("whatever:///whatever", grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"customLB":{}}]}`), grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial failed: %v", err) + } + defer cc.Close() + + // Ensure the OOB listener is stopped before the client is closed to avoid + // a potential irrelevant error in the logs. + defer oobLis.Stop() + + // Set a few metrics and wait for them on the client side. + smr.SetCPUUtilization(10) + smr.SetMemoryUtilization(0.1) + smr.SetNamedUtilization("bob", 0.555) + loadReportWant := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 10, + MemUtilization: 0.1, + Utilization: map[string]float64{"bob": 0.555}, + } + +testReport: + for { + select { + case r := <-oobLis.loadReportCh: + t.Log("Load report received: ", r) + if proto.Equal(r, loadReportWant) { + // Success! + break testReport + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for load report: %v", loadReportWant) + } + } + + // Change and add metrics and wait for them on the client side. + smr.SetCPUUtilization(0.5) + smr.SetMemoryUtilization(0.2) + smr.SetNamedUtilization("mary", 0.321) + loadReportWant = &v3orcapb.OrcaLoadReport{ + CpuUtilization: 0.5, + MemUtilization: 0.2, + Utilization: map[string]float64{"bob": 0.555, "mary": 0.321}, + } + + for { + select { + case r := <-oobLis.loadReportCh: + t.Log("Load report received: ", r) + if proto.Equal(r, loadReportWant) { + // Success! + return + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for load report: %v", loadReportWant) + } + } +} + +// fakeORCAService is a simple implementation of an ORCA service that pushes +// requests it receives from clients to a channel and sends responses from a +// channel back. This allows tests to verify the client is sending requests +// and processing responses properly. +type fakeORCAService struct { + v3orcaservicegrpc.UnimplementedOpenRcaServiceServer + + reqCh chan *v3orcaservicepb.OrcaLoadReportRequest + respCh chan interface{} // either *v3orcapb.OrcaLoadReport or error +} + +func newFakeORCAService() *fakeORCAService { + return &fakeORCAService{ + reqCh: make(chan *v3orcaservicepb.OrcaLoadReportRequest), + respCh: make(chan interface{}), + } +} + +func (f *fakeORCAService) close() { + close(f.respCh) +} + +func (f *fakeORCAService) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + f.reqCh <- req + for resp := range f.respCh { + if err, ok := resp.(error); ok { + return err + } + if err := stream.Send(resp.(*v3orcapb.OrcaLoadReport)); err != nil { + // In the event that a stream error occurs, a new stream will have + // been created that was waiting for this response message. Push + // it back onto the channel and return. + // + // This happens because we range over respCh. If we changed to + // instead select on respCh + stream.Context(), the same situation + // could still occur due to a race between noticing the two events, + // so such a workaround would still be needed to prevent flakiness. + f.respCh <- resp + return err + } + } + return nil +} + +// TestProducerBackoff verifies that the ORCA producer applies the proper +// backoff after stream failures. +func (s) TestProducerBackoff(t *testing.T) { + grpctest.TLogger.ExpectErrorN("injected error", 4) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Provide a convenient way to expect backoff calls and return a minimal + // value. + const backoffShouldNotBeCalled = 9999 // Use to assert backoff function is not called. + const backoffAllowAny = -1 // Use to ignore any backoff calls. + expectedBackoff := backoffAllowAny + oldBackoff := internal.DefaultBackoffFunc + internal.DefaultBackoffFunc = func(got int) time.Duration { + if expectedBackoff == backoffShouldNotBeCalled { + t.Errorf("Unexpected backoff call; parameter = %v", got) + } else if expectedBackoff != backoffAllowAny { + if got != expectedBackoff { + t.Errorf("Unexpected backoff received; got %v want %v", got, expectedBackoff) + } + } + return time.Millisecond + } + defer func() { internal.DefaultBackoffFunc = oldBackoff }() + + // Initialize listener for our ORCA server. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + // Register our fake ORCA service. + s := grpc.NewServer() + fake := newFakeORCAService() + defer fake.close() + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, fake) + go s.Serve(lis) + defer s.Stop() + + // Define the report interval and a function to wait for it to be sent to + // the server. + const reportInterval = 123 * time.Second + awaitRequest := func(interval time.Duration) { + select { + case req := <-fake.reqCh: + if got := req.GetReportInterval().AsDuration(); got != interval { + t.Errorf("Unexpected report interval; got %v want %v", got, interval) + } + case <-ctx.Done(): + t.Fatalf("Did not receive client request") + } + } + + // Create our client with an OOB listener in the LB policy it selects. + r := manual.NewBuilderWithScheme("whatever") + oobLis := newTestOOBListener() + + lisOpts := orca.OOBListenerOptions{ReportInterval: reportInterval} + li := &listenerInfo{listener: oobLis, opts: lisOpts} + r.InitialState(resolver.State{Addresses: []resolver.Address{setListenerInfo(resolver.Address{Addr: lis.Addr().String()}, li)}}) + cc, err := grpc.Dial("whatever:///whatever", grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"customLB":{}}]}`), grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial failed: %v", err) + } + defer cc.Close() + + // Ensure the OOB listener is stopped before the client is closed to avoid + // a potential irrelevant error in the logs. + defer oobLis.Stop() + + // Define a load report to send and expect the client to see. + loadReportWant := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 10, + MemUtilization: 0.1, + Utilization: map[string]float64{"bob": 0.555}, + } + + // Unblock the fake. + awaitRequest(reportInterval) + fake.respCh <- loadReportWant + select { + case r := <-oobLis.loadReportCh: + t.Log("Load report received: ", r) + if proto.Equal(r, loadReportWant) { + // Success! + break + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for load report: %v", loadReportWant) + } + + // The next request should be immediate, since there was a message + // received. + expectedBackoff = backoffShouldNotBeCalled + fake.respCh <- status.Errorf(codes.Internal, "injected error") + awaitRequest(reportInterval) + + // The next requests will need to backoff. + expectedBackoff = 0 + fake.respCh <- status.Errorf(codes.Internal, "injected error") + awaitRequest(reportInterval) + expectedBackoff = 1 + fake.respCh <- status.Errorf(codes.Internal, "injected error") + awaitRequest(reportInterval) + expectedBackoff = 2 + fake.respCh <- status.Errorf(codes.Internal, "injected error") + awaitRequest(reportInterval) + // The next request should be immediate, since there was a message + // received. + expectedBackoff = backoffShouldNotBeCalled + + // Send another valid response and wait for it on the client. + fake.respCh <- loadReportWant + select { + case r := <-oobLis.loadReportCh: + t.Log("Load report received: ", r) + if proto.Equal(r, loadReportWant) { + // Success! + break + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for load report: %v", loadReportWant) + } +} + +// TestProducerMultipleListeners tests that multiple listeners works as +// expected in a producer: requesting the proper interval and delivering the +// update to all listeners. +func (s) TestProducerMultipleListeners(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Provide a convenient way to expect backoff calls and return a minimal + // value. + oldBackoff := internal.DefaultBackoffFunc + internal.DefaultBackoffFunc = func(got int) time.Duration { + return time.Millisecond + } + defer func() { internal.DefaultBackoffFunc = oldBackoff }() + + // Initialize listener for our ORCA server. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + // Register our fake ORCA service. + s := grpc.NewServer() + fake := newFakeORCAService() + defer fake.close() + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, fake) + go s.Serve(lis) + defer s.Stop() + + // Define the report interval and a function to wait for it to be sent to + // the server. + const reportInterval1 = 123 * time.Second + const reportInterval2 = 234 * time.Second + const reportInterval3 = 56 * time.Second + awaitRequest := func(interval time.Duration) { + select { + case req := <-fake.reqCh: + if got := req.GetReportInterval().AsDuration(); got != interval { + t.Errorf("Unexpected report interval; got %v want %v", got, interval) + } + case <-ctx.Done(): + t.Fatalf("Did not receive client request") + } + } + + // Create our client with an OOB listener in the LB policy it selects. + r := manual.NewBuilderWithScheme("whatever") + oobLis1 := newTestOOBListener() + lisOpts1 := orca.OOBListenerOptions{ReportInterval: reportInterval1} + li := &listenerInfo{listener: oobLis1, opts: lisOpts1} + r.InitialState(resolver.State{Addresses: []resolver.Address{setListenerInfo(resolver.Address{Addr: lis.Addr().String()}, li)}}) + cc, err := grpc.Dial("whatever:///whatever", grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"customLB":{}}]}`), grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial failed: %v", err) + } + defer cc.Close() + + // Ensure the OOB listener is stopped before the client is closed to avoid + // a potential irrelevant error in the logs. + defer oobLis1.Stop() + + oobLis2 := newTestOOBListener() + lisOpts2 := orca.OOBListenerOptions{ReportInterval: reportInterval2} + + oobLis3 := newTestOOBListener() + lisOpts3 := orca.OOBListenerOptions{ReportInterval: reportInterval3} + + // Define a load report to send and expect the client to see. + loadReportWant := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 10, + MemUtilization: 0.1, + Utilization: map[string]float64{"bob": 0.555}, + } + + // Receive reports and update counts for the three listeners. + var reportsMu sync.Mutex + var reportsReceived1, reportsReceived2, reportsReceived3 int + go func() { + for { + select { + case r := <-oobLis1.loadReportCh: + t.Log("Load report 1 received: ", r) + if !proto.Equal(r, loadReportWant) { + t.Errorf("Unexpected report received: %+v", r) + } + reportsMu.Lock() + reportsReceived1++ + reportsMu.Unlock() + case r := <-oobLis2.loadReportCh: + t.Log("Load report 2 received: ", r) + if !proto.Equal(r, loadReportWant) { + t.Errorf("Unexpected report received: %+v", r) + } + reportsMu.Lock() + reportsReceived2++ + reportsMu.Unlock() + case r := <-oobLis3.loadReportCh: + t.Log("Load report 3 received: ", r) + if !proto.Equal(r, loadReportWant) { + t.Errorf("Unexpected report received: %+v", r) + } + reportsMu.Lock() + reportsReceived3++ + reportsMu.Unlock() + case <-ctx.Done(): + // Test has ended; exit + return + } + } + }() + + // checkReports is a helper function to check the report counts for the three listeners. + checkReports := func(r1, r2, r3 int) { + t.Helper() + for ctx.Err() == nil { + reportsMu.Lock() + if r1 == reportsReceived1 && r2 == reportsReceived2 && r3 == reportsReceived3 { + // Success! + reportsMu.Unlock() + return + } + if reportsReceived1 > r1 || reportsReceived2 > r2 || reportsReceived3 > r3 { + reportsMu.Unlock() + t.Fatalf("received excess reports. got %v %v %v; want %v %v %v", reportsReceived1, reportsReceived2, reportsReceived3, r1, r2, r3) + return + } + reportsMu.Unlock() + time.Sleep(10 * time.Millisecond) + } + t.Fatalf("timed out waiting for reports received. got %v %v %v; want %v %v %v", reportsReceived1, reportsReceived2, reportsReceived3, r1, r2, r3) + } + + // Only 1 listener; expect reportInterval1 to be used and expect the report + // to be sent to the listener. + awaitRequest(reportInterval1) + fake.respCh <- loadReportWant + checkReports(1, 0, 0) + + // Register listener 2 with a less frequent interval; no need to recreate + // stream. Report should go to both listeners. + oobLis2.cleanup = orca.RegisterOOBListener(li.sc, oobLis2, lisOpts2) + fake.respCh <- loadReportWant + checkReports(2, 1, 0) + + // Register listener 3 with a more frequent interval; stream is recreated + // with this interval. The next report will go to all three listeners. + oobLis3.cleanup = orca.RegisterOOBListener(li.sc, oobLis3, lisOpts3) + awaitRequest(reportInterval3) + fake.respCh <- loadReportWant + checkReports(3, 2, 1) + + // Another report without a change in listeners should go to all three listeners. + fake.respCh <- loadReportWant + checkReports(4, 3, 2) + + // Stop listener 2. This does not affect the interval as listener 3 is + // still the shortest. The next update goes to listeners 1 and 3. + oobLis2.Stop() + fake.respCh <- loadReportWant + checkReports(5, 3, 3) + + // Stop listener 3. This makes the interval longer. Reports should only + // go to listener 1 now. + oobLis3.Stop() + awaitRequest(reportInterval1) + fake.respCh <- loadReportWant + checkReports(6, 3, 3) + // Another report without a change in listeners should go to the first listener. + fake.respCh <- loadReportWant + checkReports(7, 3, 3) +} diff --git a/orca/server_metrics.go b/orca/server_metrics.go new file mode 100644 index 000000000000..f2cdb9b0b26f --- /dev/null +++ b/orca/server_metrics.go @@ -0,0 +1,351 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "sync" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" +) + +// ServerMetrics is the data returned from a server to a client to describe the +// current state of the server and/or the cost of a request when used per-call. +type ServerMetrics struct { + CPUUtilization float64 // CPU utilization: [0, inf); unset=-1 + MemUtilization float64 // Memory utilization: [0, 1.0]; unset=-1 + AppUtilization float64 // Application utilization: [0, inf); unset=-1 + QPS float64 // queries per second: [0, inf); unset=-1 + EPS float64 // errors per second: [0, inf); unset=-1 + + // The following maps must never be nil. + + Utilization map[string]float64 // Custom fields: [0, 1.0] + RequestCost map[string]float64 // Custom fields: [0, inf); not sent OOB + NamedMetrics map[string]float64 // Custom fields: [0, inf); not sent OOB +} + +// toLoadReportProto dumps sm as an OrcaLoadReport proto. +func (sm *ServerMetrics) toLoadReportProto() *v3orcapb.OrcaLoadReport { + ret := &v3orcapb.OrcaLoadReport{ + Utilization: sm.Utilization, + RequestCost: sm.RequestCost, + NamedMetrics: sm.NamedMetrics, + } + if sm.CPUUtilization != -1 { + ret.CpuUtilization = sm.CPUUtilization + } + if sm.MemUtilization != -1 { + ret.MemUtilization = sm.MemUtilization + } + if sm.AppUtilization != -1 { + ret.ApplicationUtilization = sm.AppUtilization + } + if sm.QPS != -1 { + ret.RpsFractional = sm.QPS + } + if sm.EPS != -1 { + ret.Eps = sm.EPS + } + return ret +} + +// merge merges o into sm, overwriting any values present in both. +func (sm *ServerMetrics) merge(o *ServerMetrics) { + mergeMap(sm.Utilization, o.Utilization) + mergeMap(sm.RequestCost, o.RequestCost) + mergeMap(sm.NamedMetrics, o.NamedMetrics) + if o.CPUUtilization != -1 { + sm.CPUUtilization = o.CPUUtilization + } + if o.MemUtilization != -1 { + sm.MemUtilization = o.MemUtilization + } + if o.AppUtilization != -1 { + sm.AppUtilization = o.AppUtilization + } + if o.QPS != -1 { + sm.QPS = o.QPS + } + if o.EPS != -1 { + sm.EPS = o.EPS + } +} + +func mergeMap(a, b map[string]float64) { + for k, v := range b { + a[k] = v + } +} + +// ServerMetricsRecorder allows for recording and providing out of band server +// metrics. +type ServerMetricsRecorder interface { + ServerMetricsProvider + + // SetCPUUtilization sets the CPU utilization server metric. Must be + // greater than zero. + SetCPUUtilization(float64) + // DeleteCPUUtilization deletes the CPU utilization server metric to + // prevent it from being sent. + DeleteCPUUtilization() + + // SetMemoryUtilization sets the memory utilization server metric. Must be + // in the range [0, 1]. + SetMemoryUtilization(float64) + // DeleteMemoryUtilization deletes the memory utiliztion server metric to + // prevent it from being sent. + DeleteMemoryUtilization() + + // SetApplicationUtilization sets the application utilization server + // metric. Must be greater than zero. + SetApplicationUtilization(float64) + // DeleteApplicationUtilization deletes the application utilization server + // metric to prevent it from being sent. + DeleteApplicationUtilization() + + // SetQPS sets the Queries Per Second server metric. Must be greater than + // zero. + SetQPS(float64) + // DeleteQPS deletes the Queries Per Second server metric to prevent it + // from being sent. + DeleteQPS() + + // SetEPS sets the Errors Per Second server metric. Must be greater than + // zero. + SetEPS(float64) + // DeleteEPS deletes the Errors Per Second server metric to prevent it from + // being sent. + DeleteEPS() + + // SetNamedUtilization sets the named utilization server metric for the + // name provided. val must be in the range [0, 1]. + SetNamedUtilization(name string, val float64) + // DeleteNamedUtilization deletes the named utilization server metric for + // the name provided to prevent it from being sent. + DeleteNamedUtilization(name string) +} + +type serverMetricsRecorder struct { + mu sync.Mutex // protects state + state *ServerMetrics // the current metrics +} + +// NewServerMetricsRecorder returns an in-memory store for ServerMetrics and +// allows for safe setting and retrieving of ServerMetrics. Also implements +// ServerMetricsProvider for use with NewService. +func NewServerMetricsRecorder() ServerMetricsRecorder { + return newServerMetricsRecorder() +} + +func newServerMetricsRecorder() *serverMetricsRecorder { + return &serverMetricsRecorder{ + state: &ServerMetrics{ + CPUUtilization: -1, + MemUtilization: -1, + AppUtilization: -1, + QPS: -1, + EPS: -1, + Utilization: make(map[string]float64), + RequestCost: make(map[string]float64), + NamedMetrics: make(map[string]float64), + }, + } +} + +// ServerMetrics returns a copy of the current ServerMetrics. +func (s *serverMetricsRecorder) ServerMetrics() *ServerMetrics { + s.mu.Lock() + defer s.mu.Unlock() + return &ServerMetrics{ + CPUUtilization: s.state.CPUUtilization, + MemUtilization: s.state.MemUtilization, + AppUtilization: s.state.AppUtilization, + QPS: s.state.QPS, + EPS: s.state.EPS, + Utilization: copyMap(s.state.Utilization), + RequestCost: copyMap(s.state.RequestCost), + NamedMetrics: copyMap(s.state.NamedMetrics), + } +} + +func copyMap(m map[string]float64) map[string]float64 { + ret := make(map[string]float64, len(m)) + for k, v := range m { + ret[k] = v + } + return ret +} + +// SetCPUUtilization records a measurement for the CPU utilization metric. +func (s *serverMetricsRecorder) SetCPUUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring CPU Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.CPUUtilization = val +} + +// DeleteCPUUtilization deletes the relevant server metric to prevent it from +// being sent. +func (s *serverMetricsRecorder) DeleteCPUUtilization() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.CPUUtilization = -1 +} + +// SetMemoryUtilization records a measurement for the memory utilization metric. +func (s *serverMetricsRecorder) SetMemoryUtilization(val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Memory Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.MemUtilization = val +} + +// DeleteMemoryUtilization deletes the relevant server metric to prevent it +// from being sent. +func (s *serverMetricsRecorder) DeleteMemoryUtilization() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.MemUtilization = -1 +} + +// SetApplicationUtilization records a measurement for a generic utilization +// metric. +func (s *serverMetricsRecorder) SetApplicationUtilization(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring Application Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.AppUtilization = val +} + +// DeleteApplicationUtilization deletes the relevant server metric to prevent +// it from being sent. +func (s *serverMetricsRecorder) DeleteApplicationUtilization() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.AppUtilization = -1 +} + +// SetQPS records a measurement for the QPS metric. +func (s *serverMetricsRecorder) SetQPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring QPS value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.QPS = val +} + +// DeleteQPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteQPS() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.QPS = -1 +} + +// SetEPS records a measurement for the EPS metric. +func (s *serverMetricsRecorder) SetEPS(val float64) { + if val < 0 { + if logger.V(2) { + logger.Infof("Ignoring EPS value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.EPS = val +} + +// DeleteEPS deletes the relevant server metric to prevent it from being sent. +func (s *serverMetricsRecorder) DeleteEPS() { + s.mu.Lock() + defer s.mu.Unlock() + s.state.EPS = -1 +} + +// SetNamedUtilization records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedUtilization(name string, val float64) { + if val < 0 || val > 1 { + if logger.V(2) { + logger.Infof("Ignoring Named Utilization value out of range: %v", val) + } + return + } + s.mu.Lock() + defer s.mu.Unlock() + s.state.Utilization[name] = val +} + +// DeleteNamedUtilization deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedUtilization(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.Utilization, name) +} + +// SetRequestCost records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetRequestCost(name string, val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.RequestCost[name] = val +} + +// DeleteRequestCost deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteRequestCost(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.RequestCost, name) +} + +// SetNamedMetric records a measurement for a utilization metric uniquely +// identifiable by name. +func (s *serverMetricsRecorder) SetNamedMetric(name string, val float64) { + s.mu.Lock() + defer s.mu.Unlock() + s.state.NamedMetrics[name] = val +} + +// DeleteNamedMetric deletes any previously recorded measurement for a +// utilization metric uniquely identifiable by name. +func (s *serverMetricsRecorder) DeleteNamedMetric(name string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.state.NamedMetrics, name) +} diff --git a/orca/server_metrics_test.go b/orca/server_metrics_test.go new file mode 100644 index 000000000000..ecc80d0e584b --- /dev/null +++ b/orca/server_metrics_test.go @@ -0,0 +1,175 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/grpctest" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestServerMetrics_Setters(t *testing.T) { + smr := NewServerMetricsRecorder() + + smr.SetCPUUtilization(0.1) + smr.SetMemoryUtilization(0.2) + smr.SetApplicationUtilization(0.3) + smr.SetQPS(0.4) + smr.SetEPS(0.5) + smr.SetNamedUtilization("x", 0.6) + + want := &ServerMetrics{ + CPUUtilization: 0.1, + MemUtilization: 0.2, + AppUtilization: 0.3, + QPS: 0.4, + EPS: 0.5, + Utilization: map[string]float64{"x": 0.6}, + NamedMetrics: map[string]float64{}, + RequestCost: map[string]float64{}, + } + + got := smr.ServerMetrics() + if d := cmp.Diff(got, want); d != "" { + t.Fatalf("unexpected server metrics: -got +want: %v", d) + } +} + +func (s) TestServerMetrics_Deleters(t *testing.T) { + smr := NewServerMetricsRecorder() + + smr.SetCPUUtilization(0.1) + smr.SetMemoryUtilization(0.2) + smr.SetApplicationUtilization(0.3) + smr.SetQPS(0.4) + smr.SetEPS(0.5) + smr.SetNamedUtilization("x", 0.6) + smr.SetNamedUtilization("y", 0.7) + + // Now delete everything except named_utilization "y". + smr.DeleteCPUUtilization() + smr.DeleteMemoryUtilization() + smr.DeleteApplicationUtilization() + smr.DeleteQPS() + smr.DeleteEPS() + smr.DeleteNamedUtilization("x") + + want := &ServerMetrics{ + CPUUtilization: -1, + MemUtilization: -1, + AppUtilization: -1, + QPS: -1, + EPS: -1, + Utilization: map[string]float64{"y": 0.7}, + NamedMetrics: map[string]float64{}, + RequestCost: map[string]float64{}, + } + + got := smr.ServerMetrics() + if d := cmp.Diff(got, want); d != "" { + t.Fatalf("unexpected server metrics: -got +want: %v", d) + } +} + +func (s) TestServerMetrics_Setters_Range(t *testing.T) { + smr := NewServerMetricsRecorder() + + smr.SetCPUUtilization(0.1) + smr.SetMemoryUtilization(0.2) + smr.SetApplicationUtilization(0.3) + smr.SetQPS(0.4) + smr.SetEPS(0.5) + smr.SetNamedUtilization("x", 0.6) + + // Negatives for all these fields should be ignored. + smr.SetCPUUtilization(-2) + smr.SetMemoryUtilization(-3) + smr.SetApplicationUtilization(-4) + smr.SetQPS(-0.1) + smr.SetEPS(-0.6) + smr.SetNamedUtilization("x", -2) + + // Memory and named utilizations over 1 are ignored. + smr.SetMemoryUtilization(1.1) + smr.SetNamedUtilization("x", 1.1) + + want := &ServerMetrics{ + CPUUtilization: 0.1, + MemUtilization: 0.2, + AppUtilization: 0.3, + QPS: 0.4, + EPS: 0.5, + Utilization: map[string]float64{"x": 0.6}, + NamedMetrics: map[string]float64{}, + RequestCost: map[string]float64{}, + } + + got := smr.ServerMetrics() + if d := cmp.Diff(got, want); d != "" { + t.Fatalf("unexpected server metrics: -got +want: %v", d) + } +} + +func (s) TestServerMetrics_Merge(t *testing.T) { + sm1 := &ServerMetrics{ + CPUUtilization: 0.1, + MemUtilization: 0.2, + AppUtilization: 0.3, + QPS: -1, + EPS: 0, + Utilization: map[string]float64{"x": 0.6}, + NamedMetrics: map[string]float64{"y": 0.2}, + RequestCost: map[string]float64{"a": 0.1}, + } + + sm2 := &ServerMetrics{ + CPUUtilization: -1, + AppUtilization: 0, + QPS: 0.9, + EPS: 20, + Utilization: map[string]float64{"x": 0.5, "y": 0.4}, + NamedMetrics: map[string]float64{"x": 0.1}, + RequestCost: map[string]float64{"a": 0.2}, + } + + want := &ServerMetrics{ + CPUUtilization: 0.1, + MemUtilization: 0, + AppUtilization: 0, + QPS: 0.9, + EPS: 20, + Utilization: map[string]float64{"x": 0.5, "y": 0.4}, + NamedMetrics: map[string]float64{"x": 0.1, "y": 0.2}, + RequestCost: map[string]float64{"a": 0.2}, + } + + sm1.merge(sm2) + if d := cmp.Diff(sm1, want); d != "" { + t.Fatalf("unexpected server metrics: -got +want: %v", d) + } +} diff --git a/orca/service.go b/orca/service.go new file mode 100644 index 000000000000..7461a6b05a1a --- /dev/null +++ b/orca/service.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + ointernal "google.golang.org/grpc/orca/internal" + "google.golang.org/grpc/status" + + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" +) + +func init() { + ointernal.AllowAnyMinReportingInterval = func(so *ServiceOptions) { + so.allowAnyMinReportingInterval = true + } + internal.ORCAAllowAnyMinReportingInterval = ointernal.AllowAnyMinReportingInterval +} + +// minReportingInterval is the absolute minimum value supported for +// out-of-band metrics reporting from the ORCA service implementation +// provided by the orca package. +const minReportingInterval = 30 * time.Second + +// Service provides an implementation of the OpenRcaService as defined in the +// [ORCA] service protos. Instances of this type must be created via calls to +// Register() or NewService(). +// +// Server applications can use the SetXxx() and DeleteXxx() methods to record +// measurements corresponding to backend metrics, which eventually get pushed to +// clients who have initiated the SteamCoreMetrics streaming RPC. +// +// [ORCA]: https://github.com/cncf/xds/blob/main/xds/service/orca/v3/orca.proto +type Service struct { + v3orcaservicegrpc.UnimplementedOpenRcaServiceServer + + // Minimum reporting interval, as configured by the user, or the default. + minReportingInterval time.Duration + + smProvider ServerMetricsProvider +} + +// ServiceOptions contains options to configure the ORCA service implementation. +type ServiceOptions struct { + // ServerMetricsProvider is the provider to be used by the service for + // reporting OOB server metrics to clients. Typically obtained via + // NewServerMetricsRecorder. This field is required. + ServerMetricsProvider ServerMetricsProvider + + // MinReportingInterval sets the lower bound for how often out-of-band + // metrics are reported on the streaming RPC initiated by the client. If + // unspecified, negative or less than the default value of 30s, the default + // is used. Clients may request a higher value as part of the + // StreamCoreMetrics streaming RPC. + MinReportingInterval time.Duration + + // Allow a minReportingInterval which is less than the default of 30s. + // Used for testing purposes only. + allowAnyMinReportingInterval bool +} + +// A ServerMetricsProvider provides ServerMetrics upon request. +type ServerMetricsProvider interface { + // ServerMetrics returns the current set of server metrics. It should + // return a read-only, immutable copy of the data that is active at the + // time of the call. + ServerMetrics() *ServerMetrics +} + +// NewService creates a new ORCA service implementation configured using the +// provided options. +func NewService(opts ServiceOptions) (*Service, error) { + // The default minimum supported reporting interval value can be overridden + // for testing purposes through the orca internal package. + if opts.ServerMetricsProvider == nil { + return nil, fmt.Errorf("ServerMetricsProvider not specified") + } + if !opts.allowAnyMinReportingInterval { + if opts.MinReportingInterval < 0 || opts.MinReportingInterval < minReportingInterval { + opts.MinReportingInterval = minReportingInterval + } + } + service := &Service{ + minReportingInterval: opts.MinReportingInterval, + smProvider: opts.ServerMetricsProvider, + } + return service, nil +} + +// Register creates a new ORCA service implementation configured using the +// provided options and registers the same on the provided grpc Server. +func Register(s *grpc.Server, opts ServiceOptions) error { + // TODO(https://github.com/cncf/xds/issues/41): replace *grpc.Server with + // grpc.ServiceRegistrar when possible. + service, err := NewService(opts) + if err != nil { + return err + } + v3orcaservicegrpc.RegisterOpenRcaServiceServer(s, service) + return nil +} + +// determineReportingInterval determines the reporting interval for out-of-band +// metrics. If the reporting interval is not specified in the request, or is +// negative or is less than the configured minimum (via +// ServiceOptions.MinReportingInterval), the latter is used. Else the value from +// the incoming request is used. +func (s *Service) determineReportingInterval(req *v3orcaservicepb.OrcaLoadReportRequest) time.Duration { + if req.GetReportInterval() == nil { + return s.minReportingInterval + } + dur := req.GetReportInterval().AsDuration() + if dur < s.minReportingInterval { + logger.Warningf("Received reporting interval %q is less than configured minimum: %v. Using minimum", dur, s.minReportingInterval) + return s.minReportingInterval + } + return dur +} + +func (s *Service) sendMetricsResponse(stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + return stream.Send(s.smProvider.ServerMetrics().toLoadReportProto()) +} + +// StreamCoreMetrics streams custom backend metrics injected by the server +// application. +func (s *Service) StreamCoreMetrics(req *v3orcaservicepb.OrcaLoadReportRequest, stream v3orcaservicegrpc.OpenRcaService_StreamCoreMetricsServer) error { + ticker := time.NewTicker(s.determineReportingInterval(req)) + defer ticker.Stop() + + for { + if err := s.sendMetricsResponse(stream); err != nil { + return err + } + // Send a response containing the currently recorded metrics + select { + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + case <-ticker.C: + } + } +} diff --git a/orca/service_test.go b/orca/service_test.go new file mode 100644 index 000000000000..9c4defbe266b --- /dev/null +++ b/orca/service_test.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package orca_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/orca" + "google.golang.org/grpc/orca/internal" + "google.golang.org/protobuf/types/known/durationpb" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" + v3orcaservicegrpc "github.com/cncf/xds/go/xds/service/orca/v3" + v3orcaservicepb "github.com/cncf/xds/go/xds/service/orca/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +const requestsMetricKey = "test-service-requests" + +// An implementation of grpc_testing.TestService for the purpose of this test. +// We cannot use the StubServer approach here because we need to register the +// OpenRCAService as well on the same gRPC server. +type testServiceImpl struct { + mu sync.Mutex + requests int64 + + testgrpc.TestServiceServer + smr orca.ServerMetricsRecorder +} + +func (t *testServiceImpl) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + t.mu.Lock() + t.requests++ + t.mu.Unlock() + + t.smr.SetNamedUtilization(requestsMetricKey, float64(t.requests)*0.01) + t.smr.SetCPUUtilization(50.0) + t.smr.SetMemoryUtilization(0.9) + t.smr.SetApplicationUtilization(1.2) + return &testpb.SimpleResponse{}, nil +} + +func (t *testServiceImpl) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { + t.smr.DeleteNamedUtilization(requestsMetricKey) + t.smr.SetCPUUtilization(0) + t.smr.SetMemoryUtilization(0) + t.smr.DeleteApplicationUtilization() + return &testpb.Empty{}, nil +} + +// TestE2E_CustomBackendMetrics_OutOfBand tests the injection of out-of-band +// custom backend metrics from the server application, and verifies that +// expected load reports are received at the client. +// +// TODO: Change this test to use the client API, when ready, to read the +// out-of-band metrics pushed by the server. +func (s) TestE2E_CustomBackendMetrics_OutOfBand(t *testing.T) { + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + // Override the min reporting interval in the internal package. + const shortReportingInterval = 10 * time.Millisecond + smr := orca.NewServerMetricsRecorder() + opts := orca.ServiceOptions{MinReportingInterval: shortReportingInterval, ServerMetricsProvider: smr} + internal.AllowAnyMinReportingInterval.(func(*orca.ServiceOptions))(&opts) + + // Register the OpenRCAService with a very short metrics reporting interval. + s := grpc.NewServer() + if err := orca.Register(s, opts); err != nil { + t.Fatalf("orca.EnableOutOfBandMetricsReportingForTesting() failed: %v", err) + } + + // Register the test service implementation on the same grpc server, and start serving. + testgrpc.RegisterTestServiceServer(s, &testServiceImpl{smr: smr}) + go s.Serve(lis) + defer s.Stop() + t.Logf("Started gRPC server at %s...", lis.Addr().String()) + + // Dial the test server. + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial(%s) failed: %v", lis.Addr().String(), err) + } + defer cc.Close() + + // Spawn a goroutine which sends 20 unary RPCs to the test server. This + // will trigger the injection of custom backend metrics from the + // testServiceImpl. + const numRequests = 20 + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testStub := testgrpc.NewTestServiceClient(cc) + errCh := make(chan error, 1) + go func() { + for i := 0; i < numRequests; i++ { + if _, err := testStub.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + errCh <- fmt.Errorf("UnaryCall failed: %v", err) + return + } + time.Sleep(time.Millisecond) + } + errCh <- nil + }() + + // Start the server streaming RPC to receive custom backend metrics. + oobStub := v3orcaservicegrpc.NewOpenRcaServiceClient(cc) + stream, err := oobStub.StreamCoreMetrics(ctx, &v3orcaservicepb.OrcaLoadReportRequest{ReportInterval: durationpb.New(shortReportingInterval)}) + if err != nil { + t.Fatalf("Failed to create a stream for out-of-band metrics") + } + + // Wait for the server to push metrics which indicate the completion of all + // the unary RPCs made from the above goroutine. + for { + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for out-of-band custom backend metrics to match expected values") + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + default: + } + + wantProto := &v3orcapb.OrcaLoadReport{ + CpuUtilization: 50.0, + MemUtilization: 0.9, + ApplicationUtilization: 1.2, + Utilization: map[string]float64{requestsMetricKey: numRequests * 0.01}, + } + gotProto, err := stream.Recv() + if err != nil { + t.Fatalf("Recv() failed: %v", err) + } + if !cmp.Equal(gotProto, wantProto, cmp.Comparer(proto.Equal)) { + t.Logf("Received load report from stream: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(wantProto)) + continue + } + // This means that we received the metrics which we expected. + break + } + + // The EmptyCall RPC is expected to delete earlier injected metrics. + if _, err := testStub.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall failed: %v", err) + } + // Wait for the server to push empty metrics which indicate the processing + // of the above EmptyCall RPC. + for { + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for out-of-band custom backend metrics to match expected values") + default: + } + + wantProto := &v3orcapb.OrcaLoadReport{} + gotProto, err := stream.Recv() + if err != nil { + t.Fatalf("Recv() failed: %v", err) + } + if !cmp.Equal(gotProto, wantProto, cmp.Comparer(proto.Equal)) { + t.Logf("Received load report from stream: %s, want: %s", pretty.ToJSON(gotProto), pretty.ToJSON(wantProto)) + continue + } + // This means that we received the metrics which we expected. + break + } +} diff --git a/picker_wrapper.go b/picker_wrapper.go index a58174b6f436..02f975951242 100644 --- a/picker_wrapper.go +++ b/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -35,6 +36,7 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool + idle bool blockingCh chan struct{} picker balancer.Picker } @@ -46,7 +48,11 @@ func newPickerWrapper() *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -57,12 +63,16 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -81,7 +91,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error @@ -89,7 +99,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -110,9 +120,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -124,14 +134,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, err + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -139,19 +152,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { - logger.Error("subconn returned from pick is not *acBalancerWrapper") + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. @@ -175,3 +189,28 @@ func (pw *pickerWrapper) close() { pw.done = true close(pw.blockingCh) } + +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/picker_wrapper_test.go b/picker_wrapper_test.go index 5f786b28580e..a4fae85d3975 100644 --- a/picker_wrapper_test.go +++ b/picker_wrapper_test.go @@ -97,7 +97,7 @@ func (s) TestBlockingPickNoSubAvailable(t *testing.T) { bp := newPickerWrapper() var finishedCount uint64 bp.updatePicker(&testingPicker{err: balancer.ErrNoSubConnAvailable, maxCalled: goroutineCount}) - // All goroutines should block because picker returns no sc available. + // All goroutines should block because picker returns no subConn available. for i := goroutineCount; i > 0; i-- { go func() { if tr, _, err := bp.pick(context.Background(), true, balancer.PickInfo{}); err != nil || tr != testT { @@ -138,7 +138,7 @@ func (s) TestBlockingPickSCNotReady(t *testing.T) { bp := newPickerWrapper() bp.updatePicker(&testingPicker{sc: testSCNotReady, maxCalled: goroutineCount}) var finishedCount uint64 - // All goroutines should block because sc is not ready. + // All goroutines should block because subConn is not ready. for i := goroutineCount; i > 0; i-- { go func() { if tr, _, err := bp.pick(context.Background(), true, balancer.PickInfo{}); err != nil || tr != testT { diff --git a/pickfirst.go b/pickfirst.go index b858c2a5e63b..abe266b021d2 100644 --- a/pickfirst.go +++ b/pickfirst.go @@ -19,11 +19,15 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/serviceconfig" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -43,94 +47,181 @@ func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &pfConfig{} + if err := json.Unmarshal(js, cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn + cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if state.BalancerConfig != nil { + cfg, ok := state.BalancerConfig.(*pfConfig) + if !ok { + return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.cfg = cfg + } + + if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil } + + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { } +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + type picker struct { result balancer.PickResult err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + func init() { balancer.Register(newPickfirstBuilder()) } diff --git a/pickfirst_test.go b/pickfirst_test.go deleted file mode 100644 index 9ece7844a355..000000000000 --- a/pickfirst_test.go +++ /dev/null @@ -1,348 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "math" - "sync" - "testing" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/status" -) - -func errorDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} - -func (s) TestOneBackendPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 1 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", - WithInsecure(), - WithResolvers(r), - WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) - // The second RPC should succeed. - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) -} - -func (s) TestBackendsPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) - // The second RPC should succeed with the first server. - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) -} - -func (s) TestNewAddressWhileBlockingPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 1 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var wg sync.WaitGroup - for i := 0; i < 3; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // This RPC blocks until NewAddress is called. - cc.Invoke(context.Background(), "/foo/bar", &req, &reply) - }() - } - time.Sleep(50 * time.Millisecond) - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) - wg.Wait() -} - -func (s) TestCloseWithPendingRPCPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 1 - _, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - var wg sync.WaitGroup - for i := 0; i < 3; i++ { - wg.Add(1) - go func() { - defer wg.Done() - // This RPC blocks until NewAddress is called. - cc.Invoke(context.Background(), "/foo/bar", &req, &reply) - }() - } - time.Sleep(50 * time.Millisecond) - cc.Close() - wg.Wait() -} - -func (s) TestOneServerDownPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) - // The second RPC should succeed with the first server. - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - break - } - time.Sleep(time.Millisecond) - } - - servers[0].stop() - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("EmptyCall() = _, %v, want _, %v", err, servers[0].port) -} - -func (s) TestAllServersDownPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 2 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}}}) - // The second RPC should succeed with the first server. - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - break - } - time.Sleep(time.Millisecond) - } - - for i := 0; i < numServers; i++ { - servers[i].stop() - } - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); status.Code(err) == codes.Unavailable { - return - } - time.Sleep(time.Millisecond) - } - t.Fatalf("EmptyCall() = _, %v, want _, error with code unavailable", err) -} - -func (s) TestAddressesRemovedPickfirst(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - numServers := 3 - servers, scleanup := startServers(t, numServers, math.MaxInt32) - defer scleanup() - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r), WithCodec(testCodec{})) - if err != nil { - t.Fatalf("failed to dial: %v", err) - } - defer cc.Close() - // The first RPC should fail because there's no address. - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - req := "port" - var reply string - if err := cc.Invoke(ctx, "/foo/bar", &req, &reply); err == nil || status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("EmptyCall() = _, %v, want _, DeadlineExceeded", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}, {Addr: servers[1].addr}, {Addr: servers[2].addr}}}) - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - break - } - time.Sleep(time.Millisecond) - } - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) - } - time.Sleep(10 * time.Millisecond) - } - - // Remove server[0]. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}}}) - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[1].port { - break - } - time.Sleep(time.Millisecond) - } - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) - } - time.Sleep(10 * time.Millisecond) - } - - // Append server[0], nothing should change. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[1].addr}, {Addr: servers[2].addr}, {Addr: servers[0].addr}}}) - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[1].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 1, err, servers[1].port) - } - time.Sleep(10 * time.Millisecond) - } - - // Remove server[1]. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[2].addr}, {Addr: servers[0].addr}}}) - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[2].port { - break - } - time.Sleep(time.Millisecond) - } - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[2].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 2, err, servers[2].port) - } - time.Sleep(10 * time.Millisecond) - } - - // Remove server[2]. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: servers[0].addr}}}) - for i := 0; i < 1000; i++ { - if err = cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err != nil && errorDesc(err) == servers[0].port { - break - } - time.Sleep(time.Millisecond) - } - for i := 0; i < 20; i++ { - if err := cc.Invoke(context.Background(), "/foo/bar", &req, &reply); err == nil || errorDesc(err) != servers[0].port { - t.Fatalf("Index %d: Invoke(_, _, _, _, _) = %v, want %s", 0, err, servers[0].port) - } - time.Sleep(10 * time.Millisecond) - } -} diff --git a/preloader.go b/preloader.go index 0a1e975ad916..cd45547854f0 100644 --- a/preloader.go +++ b/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/profiling/cmd/remote.go b/profiling/cmd/remote.go index b6adfd6a6bef..71c21c332b74 100644 --- a/profiling/cmd/remote.go +++ b/profiling/cmd/remote.go @@ -26,6 +26,7 @@ import ( "time" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ppb "google.golang.org/grpc/profiling/proto" ) @@ -78,7 +79,7 @@ func remoteCommand() error { } logger.Infof("dialing %s", *flagAddress) - cc, err := grpc.Dial(*flagAddress, grpc.WithInsecure()) + cc, err := grpc.Dial(*flagAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { logger.Errorf("cannot dial %s: %v", *flagAddress, err) return err diff --git a/profiling/profiling.go b/profiling/profiling.go index 7112ef2e6a42..869054ea794a 100644 --- a/profiling/profiling.go +++ b/profiling/profiling.go @@ -18,7 +18,7 @@ // Package profiling exposes methods to manage profiling within gRPC. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/profiling/proto/service.pb.go b/profiling/proto/service.pb.go index 22bc4bc47f48..6ab6632c2198 100644 --- a/profiling/proto/service.pb.go +++ b/profiling/proto/service.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: profiling/proto/service.proto package proto import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // EnableRequest defines the fields in a /Profiling/Enable method request to // toggle profiling on and off within a gRPC program. type EnableRequest struct { diff --git a/profiling/proto/service_grpc.pb.go b/profiling/proto/service_grpc.pb.go index bfdcc69bffb8..5d696a26f924 100644 --- a/profiling/proto/service_grpc.pb.go +++ b/profiling/proto/service_grpc.pb.go @@ -1,4 +1,22 @@ +// Copyright 2019 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: profiling/proto/service.proto package proto @@ -14,6 +32,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Profiling_Enable_FullMethodName = "/grpc.go.profiling.v1alpha.Profiling/Enable" + Profiling_GetStreamStats_FullMethodName = "/grpc.go.profiling.v1alpha.Profiling/GetStreamStats" +) + // ProfilingClient is the client API for Profiling service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -35,7 +58,7 @@ func NewProfilingClient(cc grpc.ClientConnInterface) ProfilingClient { func (c *profilingClient) Enable(ctx context.Context, in *EnableRequest, opts ...grpc.CallOption) (*EnableResponse, error) { out := new(EnableResponse) - err := c.cc.Invoke(ctx, "/grpc.go.profiling.v1alpha.Profiling/Enable", in, out, opts...) + err := c.cc.Invoke(ctx, Profiling_Enable_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -44,7 +67,7 @@ func (c *profilingClient) Enable(ctx context.Context, in *EnableRequest, opts .. func (c *profilingClient) GetStreamStats(ctx context.Context, in *GetStreamStatsRequest, opts ...grpc.CallOption) (*GetStreamStatsResponse, error) { out := new(GetStreamStatsResponse) - err := c.cc.Invoke(ctx, "/grpc.go.profiling.v1alpha.Profiling/GetStreamStats", in, out, opts...) + err := c.cc.Invoke(ctx, Profiling_GetStreamStats_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -94,7 +117,7 @@ func _Profiling_Enable_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.go.profiling.v1alpha.Profiling/Enable", + FullMethod: Profiling_Enable_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProfilingServer).Enable(ctx, req.(*EnableRequest)) @@ -112,7 +135,7 @@ func _Profiling_GetStreamStats_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.go.profiling.v1alpha.Profiling/GetStreamStats", + FullMethod: Profiling_GetStreamStats_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProfilingServer).GetStreamStats(ctx, req.(*GetStreamStatsRequest)) diff --git a/profiling/service/service.go b/profiling/service/service.go index 5b034372842e..c0234987392c 100644 --- a/profiling/service/service.go +++ b/profiling/service/service.go @@ -21,7 +21,7 @@ // queried by a client to remotely manage the gRPC profiling behaviour of an // application. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/reflection/adapt.go b/reflection/adapt.go new file mode 100644 index 000000000000..33b907a36da4 --- /dev/null +++ b/reflection/adapt.go @@ -0,0 +1,187 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reflection + +import ( + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" +) + +// asV1Alpha returns an implementation of the v1alpha version of the reflection +// interface that delegates all calls to the given v1 version. +func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { + return v1AlphaServerImpl{svr: svr} +} + +type v1AlphaServerImpl struct { + svr v1reflectiongrpc.ServerReflectionServer +} + +func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { + return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) +} + +type v1AlphaServerStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer +} + +func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { + return s.ServerReflection_ServerReflectionInfoServer.Send(v1ToV1AlphaResponse(response)) +} + +func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { + resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Request(resp), nil +} + +func v1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { + var v1alpha v1alphareflectionpb.ServerReflectionResponse + v1alpha.ValidHost = v1.ValidHost + if v1.OriginalRequest != nil { + v1alpha.OriginalRequest = v1ToV1AlphaRequest(v1.OriginalRequest) + } + switch mr := v1.MessageResponse.(type) { + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1alphareflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1alphareflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1alpha +} + +func v1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { + var v1 v1reflectionpb.ServerReflectionRequest + v1.Host = v1alpha.Host + switch mr := v1alpha.MessageRequest.(type) { + case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr.FileContainingExtension != nil { + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + case *v1alphareflectionpb.ServerReflectionRequest_ListServices: + v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + default: + // no value set + } + return &v1 +} + +func v1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { + var v1alpha v1alphareflectionpb.ServerReflectionRequest + v1alpha.Host = v1.Host + switch mr := v1.MessageRequest.(type) { + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ + FileByFilename: mr.FileByFilename, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ + FileContainingSymbol: mr.FileContainingSymbol, + } + } + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ + ContainingType: mr.FileContainingExtension.GetContainingType(), + ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), + }, + } + } + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, + } + } + case *v1reflectionpb.ServerReflectionRequest_ListServices: + if mr != nil { + v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ + ListServices: mr.ListServices, + } + } + default: + // no value set + } + return &v1alpha +} diff --git a/reflection/grpc_reflection_v1/reflection.pb.go b/reflection/grpc_reflection_v1/reflection.pb.go new file mode 100644 index 000000000000..ececdb89c977 --- /dev/null +++ b/reflection/grpc_reflection_v1/reflection.pb.go @@ -0,0 +1,953 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are assignable to MessageRequest: + // + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` +} + +func (x *ServerReflectionRequest) Reset() { + *x = ServerReflectionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionRequest) ProtoMessage() {} + +func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} +} + +func (x *ServerReflectionRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (x *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (x *ServerReflectionRequest) GetListServices() string { + if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + // Find a proto file by the file name. + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + // Find the proto file which defines an extension extending the given + // message type with the given field number. + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + // Finds the tag numbers used by all known extensions of the given message + // type, and appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + // List the full names of registered services. The content will not be + // checked. + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionRequest) Reset() { + *x = ExtensionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRequest) ProtoMessage() {} + +func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionRequest) GetContainingType() string { + if x != nil { + return x.ContainingType + } + return "" +} + +func (x *ExtensionRequest) GetExtensionNumber() int32 { + if x != nil { + return x.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the message_request + // in the request. + // + // Types that are assignable to MessageResponse: + // + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` +} + +func (x *ServerReflectionResponse) Reset() { + *x = ServerReflectionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerReflectionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerReflectionResponse) ProtoMessage() {} + +func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerReflectionResponse) GetValidHost() string { + if x != nil { + return x.ValidHost + } + return "" +} + +func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if x != nil { + return x.OriginalRequest + } + return nil +} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + // This message is used to answer all_extension_numbers_of_type requests. + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + // This message is used to answer list_services requests. + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + // This message is used when an error occurs. + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` +} + +func (x *FileDescriptorResponse) Reset() { + *x = FileDescriptorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorResponse) ProtoMessage() {} + +func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} +} + +func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if x != nil { + return x.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` +} + +func (x *ExtensionNumberResponse) Reset() { + *x = ExtensionNumberResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionNumberResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionNumberResponse) ProtoMessage() {} + +func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} +} + +func (x *ExtensionNumberResponse) GetBaseTypeName() string { + if x != nil { + return x.BaseTypeName + } + return "" +} + +func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if x != nil { + return x.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` +} + +func (x *ListServiceResponse) Reset() { + *x = ListServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceResponse) ProtoMessage() {} + +func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} +} + +func (x *ListServiceResponse) GetService() []*ServiceResponse { + if x != nil { + return x.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ServiceResponse) Reset() { + *x = ServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceResponse) ProtoMessage() {} + +func (x *ServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} +} + +func (x *ServiceResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ErrorResponse) Reset() { + *x = ErrorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorResponse) ProtoMessage() {} + +func (x *ErrorResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorResponse) GetErrorCode() int32 { + if x != nil { + return x.ErrorCode + } + return 0 +} + +func (x *ErrorResponse) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, + 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, + 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, + 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, + 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, + 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, + 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc +) + +func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + }) + return file_grpc_reflection_v1_reflection_proto_rawDescData +} + +var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ + (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest + (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest + (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse + (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse + (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse + (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse + (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse + (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse +} +var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ + 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest + 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest + 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse + 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse + 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse + 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse + 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse + 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest + 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse + 8, // [8:9] is the sub-list for method output_type + 7, // [7:8] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_grpc_reflection_v1_reflection_proto_init() } +func file_grpc_reflection_v1_reflection_proto_init() { + if File_grpc_reflection_v1_reflection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerReflectionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionNumberResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } + file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, + }.Build() + File_grpc_reflection_v1_reflection_proto = out.File + file_grpc_reflection_v1_reflection_proto_rawDesc = nil + file_grpc_reflection_v1_reflection_proto_goTypes = nil + file_grpc_reflection_v1_reflection_proto_depIdxs = nil +} diff --git a/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/reflection/grpc_reflection_v1/reflection_grpc.pb.go new file mode 100644 index 000000000000..62b56a8be0e6 --- /dev/null +++ b/reflection/grpc_reflection_v1/reflection_grpc.pb.go @@ -0,0 +1,164 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection. A more complete description of how +// server reflection works can be found at +// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md +// +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: grpc/reflection/v1/reflection.proto + +package grpc_reflection_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" +) + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ServerReflectionServer will +// result in compilation errors. +type UnsafeServerReflectionServer interface { + mustEmbedUnimplementedServerReflectionServer() +} + +func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { + s.RegisterService(&ServerReflection_ServiceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ServerReflection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/reflection/v1/reflection.proto", +} diff --git a/reflection/grpc_reflection_v1alpha/reflection.pb.go b/reflection/grpc_reflection_v1alpha/reflection.pb.go index 1f859f764881..d54c07676d5b 100644 --- a/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -1,4 +1,4 @@ -// Copyright 2016 gRPC authors. +// Copyright 2016 The gRPC Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,19 +11,20 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - // Service exported by server reflection +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: reflection/grpc_reflection_v1alpha/reflection.proto +// protoc-gen-go v1.30.0 +// protoc v4.22.0 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -37,22 +38,22 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The message sent by the client when calling ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their // defined field and then handles them using corresponding methods. // // Types that are assignable to MessageRequest: + // // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol // *ServerReflectionRequest_FileContainingExtension @@ -64,7 +65,7 @@ type ServerReflectionRequest struct { func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -77,7 +78,7 @@ func (x *ServerReflectionRequest) String() string { func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -90,9 +91,10 @@ func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetHost() string { if x != nil { return x.Host @@ -107,6 +109,7 @@ func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_ return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileByFilename() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { return x.FileByFilename @@ -114,6 +117,7 @@ func (x *ServerReflectionRequest) GetFileByFilename() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingSymbol() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { return x.FileContainingSymbol @@ -121,6 +125,7 @@ func (x *ServerReflectionRequest) GetFileContainingSymbol() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { return x.FileContainingExtension @@ -128,6 +133,7 @@ func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { return x.AllExtensionNumbersOfType @@ -135,6 +141,7 @@ func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetListServices() string { if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { return x.ListServices @@ -148,6 +155,8 @@ type isServerReflectionRequest_MessageRequest interface { type ServerReflectionRequest_FileByFilename struct { // Find a proto file by the file name. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` } @@ -155,12 +164,16 @@ type ServerReflectionRequest_FileContainingSymbol struct { // Find the proto file that declares the given fully-qualified symbol name. // This field should be a fully-qualified symbol name // (e.g. .[.] or .). + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` } type ServerReflectionRequest_FileContainingExtension struct { // Find the proto file which defines an extension extending the given // message type with the given field number. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` } @@ -173,12 +186,16 @@ type ServerReflectionRequest_AllExtensionNumbersOfType struct { // StatusCode::UNIMPLEMENTED if it's not implemented. // This field should be a fully-qualified type name. The format is // . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` } type ServerReflectionRequest_ListServices struct { // List the full names of registered services. The content will not be // checked. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` } @@ -195,20 +212,25 @@ func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRe // The type name and extension number sent by the client when requesting // file_containing_extension. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Fully-qualified type name. The format should be . - ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` - ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` } func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -221,7 +243,7 @@ func (x *ExtensionRequest) String() string { func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -234,9 +256,10 @@ func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionRequest) GetContainingType() string { if x != nil { return x.ContainingType @@ -244,6 +267,7 @@ func (x *ExtensionRequest) GetContainingType() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionRequest) GetExtensionNumber() int32 { if x != nil { return x.ExtensionNumber @@ -252,17 +276,22 @@ func (x *ExtensionRequest) GetExtensionNumber() int32 { } // The message sent by the server to answer ServerReflectionInfo method. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server sets one of the following fields according to the - // message_request in the request. + // The server set one of the following fields according to the message_request + // in the request. // // Types that are assignable to MessageResponse: + // // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse @@ -273,7 +302,7 @@ type ServerReflectionResponse struct { func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -286,7 +315,7 @@ func (x *ServerReflectionResponse) String() string { func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -299,9 +328,10 @@ func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetValidHost() string { if x != nil { return x.ValidHost @@ -309,6 +339,7 @@ func (x *ServerReflectionResponse) GetValidHost() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { if x != nil { return x.OriginalRequest @@ -323,6 +354,7 @@ func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionRespon return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { return x.FileDescriptorResponse @@ -330,6 +362,7 @@ func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorRe return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { return x.AllExtensionNumbersResponse @@ -337,6 +370,7 @@ func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNu return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { return x.ListServicesResponse @@ -344,6 +378,7 @@ func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceRespons return nil } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { return x.ErrorResponse @@ -357,26 +392,34 @@ type isServerReflectionResponse_MessageResponse interface { type ServerReflectionResponse_FileDescriptorResponse struct { // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a + // file_containing_extension requests with transitive dependencies. As + // the repeated label is not allowed in oneof fields, we use a // FileDescriptorResponse message to encapsulate the repeated fields. // The reflection service is allowed to avoid sending FileDescriptorProtos // that were previously sent in response to earlier requests in the stream. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` } type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requests. + // This message is used to answer all_extension_numbers_of_type requst. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` } type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services requests. + // This message is used to answer list_services request. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` } type ServerReflectionResponse_ErrorResponse struct { // This message is used when an error occurs. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` } @@ -393,6 +436,8 @@ func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_Messag // Serialized FileDescriptorProto messages sent by the server answering // a file_by_filename, file_containing_symbol, or file_containing_extension // request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type FileDescriptorResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -401,13 +446,15 @@ type FileDescriptorResponse struct { // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` } func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -420,7 +467,7 @@ func (x *FileDescriptorResponse) String() string { func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -433,9 +480,10 @@ func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { if x != nil { return x.FileDescriptorProto @@ -445,6 +493,8 @@ func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionNumberResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -452,14 +502,17 @@ type ExtensionNumberResponse struct { // Full name of the base type, including the package name. The format // is . - BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` } func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -472,7 +525,7 @@ func (x *ExtensionNumberResponse) String() string { func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -485,9 +538,10 @@ func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionNumberResponse) GetBaseTypeName() string { if x != nil { return x.BaseTypeName @@ -495,6 +549,7 @@ func (x *ExtensionNumberResponse) GetBaseTypeName() string { return "" } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { if x != nil { return x.ExtensionNumber @@ -503,6 +558,8 @@ func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { } // A list of ServiceResponse sent by the server answering list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ListServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -510,13 +567,15 @@ type ListServiceResponse struct { // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` } func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -529,7 +588,7 @@ func (x *ListServiceResponse) String() string { func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -542,9 +601,10 @@ func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ListServiceResponse) GetService() []*ServiceResponse { if x != nil { return x.Service @@ -554,6 +614,8 @@ func (x *ListServiceResponse) GetService() []*ServiceResponse { // The information of a single service used by ListServiceResponse to answer // list_services request. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -561,13 +623,15 @@ type ServiceResponse struct { // Full name of a registered service, including its package name. The format // is . + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } func (x *ServiceResponse) Reset() { *x = ServiceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -580,7 +644,7 @@ func (x *ServiceResponse) String() string { func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -593,9 +657,10 @@ func (x *ServiceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServiceResponse) GetName() string { if x != nil { return x.Name @@ -604,20 +669,25 @@ func (x *ServiceResponse) GetName() string { } // The error code and error message sent by the server when an error occurs. +// +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ErrorResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // This field uses the error codes defined in grpc::StatusCode. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` } func (x *ErrorResponse) Reset() { *x = ErrorResponse{} if protoimpl.UnsafeEnabled { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -630,7 +700,7 @@ func (x *ErrorResponse) String() string { func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -643,9 +713,10 @@ func (x *ErrorResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} + return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ErrorResponse) GetErrorCode() int32 { if x != nil { return x.ErrorCode @@ -653,6 +724,7 @@ func (x *ErrorResponse) GetErrorCode() int32 { return 0 } +// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ErrorResponse) GetErrorMessage() string { if x != nil { return x.ErrorMessage @@ -660,136 +732,139 @@ func (x *ErrorResponse) GetErrorMessage() string { return "" } -var File_reflection_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor - -var file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, - 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x22, 0xf8, - 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, - 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, - 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, - 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, - 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, - 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, - 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, - 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x5b, 0x0a, - 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6b, 0x0a, 0x18, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, +var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor + +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, + 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, + 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, + 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, - 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, - 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, - 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, + 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, + 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, - 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, - 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x93, 0x01, 0x0a, - 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, + 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, - 0x30, 0x01, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc ) -func file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData) +func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { + file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) }) - return file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDescData + return file_grpc_reflection_v1alpha_reflection_proto_rawDescData } -var file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ +var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse @@ -799,7 +874,7 @@ var file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = []interfa (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse } -var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ +var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse @@ -816,13 +891,13 @@ var file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ 0, // [0:7] is the sub-list for field type_name } -func init() { file_reflection_grpc_reflection_v1alpha_reflection_proto_init() } -func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { - if File_reflection_grpc_reflection_v1alpha_reflection_proto != nil { +func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } +func file_grpc_reflection_v1alpha_reflection_proto_init() { + if File_grpc_reflection_v1alpha_reflection_proto != nil { return } if !protoimpl.UnsafeEnabled { - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionRequest); i { case 0: return &v.state @@ -834,7 +909,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionRequest); i { case 0: return &v.state @@ -846,7 +921,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServerReflectionResponse); i { case 0: return &v.state @@ -858,7 +933,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FileDescriptorResponse); i { case 0: return &v.state @@ -870,7 +945,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExtensionNumberResponse); i { case 0: return &v.state @@ -882,7 +957,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListServiceResponse); i { case 0: return &v.state @@ -894,7 +969,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ServiceResponse); i { case 0: return &v.state @@ -906,7 +981,7 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { return nil } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ErrorResponse); i { case 0: return &v.state @@ -919,14 +994,14 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { } } } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), (*ServerReflectionRequest_FileContainingExtension)(nil), (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), (*ServerReflectionRequest_ListServices)(nil), } - file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ (*ServerReflectionResponse_FileDescriptorResponse)(nil), (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), (*ServerReflectionResponse_ListServicesResponse)(nil), @@ -936,18 +1011,18 @@ func file_reflection_grpc_reflection_v1alpha_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc, + RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, NumEnums: 0, NumMessages: 8, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes, - DependencyIndexes: file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs, - MessageInfos: file_reflection_grpc_reflection_v1alpha_reflection_proto_msgTypes, + GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, + DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, + MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, }.Build() - File_reflection_grpc_reflection_v1alpha_reflection_proto = out.File - file_reflection_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil - file_reflection_grpc_reflection_v1alpha_reflection_proto_goTypes = nil - file_reflection_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil + File_grpc_reflection_v1alpha_reflection_proto = out.File + file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil + file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil + file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil } diff --git a/reflection/grpc_reflection_v1alpha/reflection.proto b/reflection/grpc_reflection_v1alpha/reflection.proto deleted file mode 100644 index ee2b82c0a5b3..000000000000 --- a/reflection/grpc_reflection_v1alpha/reflection.proto +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection - -syntax = "proto3"; - -option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; - -package grpc.reflection.v1alpha; - -service ServerReflection { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - rpc ServerReflectionInfo(stream ServerReflectionRequest) - returns (stream ServerReflectionResponse); -} - -// The message sent by the client when calling ServerReflectionInfo method. -message ServerReflectionRequest { - string host = 1; - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - oneof message_request { - // Find a proto file by the file name. - string file_by_filename = 3; - - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. .[.] or .). - string file_containing_symbol = 4; - - // Find the proto file which defines an extension extending the given - // message type with the given field number. - ExtensionRequest file_containing_extension = 5; - - // Finds the tag numbers used by all known extensions of extendee_type, and - // appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // . - string all_extension_numbers_of_type = 6; - - // List the full names of registered services. The content will not be - // checked. - string list_services = 7; - } -} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -message ExtensionRequest { - // Fully-qualified type name. The format should be . - string containing_type = 1; - int32 extension_number = 2; -} - -// The message sent by the server to answer ServerReflectionInfo method. -message ServerReflectionResponse { - string valid_host = 1; - ServerReflectionRequest original_request = 2; - // The server sets one of the following fields according to the - // message_request in the request. - oneof message_response { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - FileDescriptorResponse file_descriptor_response = 4; - - // This message is used to answer all_extension_numbers_of_type requests. - ExtensionNumberResponse all_extension_numbers_response = 5; - - // This message is used to answer list_services requests. - ListServiceResponse list_services_response = 6; - - // This message is used when an error occurs. - ErrorResponse error_response = 7; - } -} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -message FileDescriptorResponse { - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - repeated bytes file_descriptor_proto = 1; -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -message ExtensionNumberResponse { - // Full name of the base type, including the package name. The format - // is . - string base_type_name = 1; - repeated int32 extension_number = 2; -} - -// A list of ServiceResponse sent by the server answering list_services request. -message ListServiceResponse { - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - repeated ServiceResponse service = 1; -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -message ServiceResponse { - // Full name of a registered service, including its package name. The format - // is . - string name = 1; -} - -// The error code and error message sent by the server when an error occurs. -message ErrorResponse { - // This field uses the error codes defined in grpc::StatusCode. - int32 error_code = 1; - string error_message = 2; -} diff --git a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go index c2b7429a06b0..367a029be6b3 100644 --- a/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ b/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -1,4 +1,26 @@ +// Copyright 2016 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// Service exported by server reflection + +// Warning: this entire file is deprecated. Use this instead: +// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// grpc/reflection/v1alpha/reflection.proto is a deprecated file. package grpc_reflection_v1alpha @@ -14,6 +36,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" +) + // ServerReflectionClient is the client API for ServerReflection service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -32,7 +58,7 @@ func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClie } func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, opts...) if err != nil { return nil, err } @@ -131,5 +157,5 @@ var ServerReflection_ServiceDesc = grpc.ServiceDesc{ ClientStreams: true, }, }, - Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", + Metadata: "grpc/reflection/v1alpha/reflection.proto", } diff --git a/reflection/grpc_testing/proto2.pb.go b/reflection/grpc_testing/proto2.pb.go index 9a8f643adb17..b4471a86c5ef 100644 --- a/reflection/grpc_testing/proto2.pb.go +++ b/reflection/grpc_testing/proto2.pb.go @@ -14,16 +14,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: reflection/grpc_testing/proto2.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" @@ -36,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ToBeExtended struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -81,15 +75,6 @@ func (*ToBeExtended) Descriptor() ([]byte, []int) { return file_reflection_grpc_testing_proto2_proto_rawDescGZIP(), []int{0} } -var extRange_ToBeExtended = []protoiface.ExtensionRangeV1{ - {Start: 10, End: 30}, -} - -// Deprecated: Use ToBeExtended.ProtoReflect.Descriptor.ExtensionRanges instead. -func (*ToBeExtended) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { - return extRange_ToBeExtended -} - func (x *ToBeExtended) GetFoo() int32 { if x != nil && x.Foo != nil { return *x.Foo diff --git a/reflection/grpc_testing/proto2_ext.pb.go b/reflection/grpc_testing/proto2_ext.pb.go index 4fe2b2a17d86..0b2147df6b2d 100644 --- a/reflection/grpc_testing/proto2_ext.pb.go +++ b/reflection/grpc_testing/proto2_ext.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: reflection/grpc_testing/proto2_ext.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type Extension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/reflection/grpc_testing/proto2_ext2.pb.go b/reflection/grpc_testing/proto2_ext2.pb.go index e84c44f22c96..8776f65e2872 100644 --- a/reflection/grpc_testing/proto2_ext2.pb.go +++ b/reflection/grpc_testing/proto2_ext2.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: reflection/grpc_testing/proto2_ext2.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type AnotherExtension struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/reflection/grpc_testing/test.pb.go b/reflection/grpc_testing/test.pb.go index 6740b629d767..6c9cd21f8f50 100644 --- a/reflection/grpc_testing/test.pb.go +++ b/reflection/grpc_testing/test.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: reflection/grpc_testing/test.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type SearchResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/reflection/grpc_testing/test_grpc.pb.go b/reflection/grpc_testing/test_grpc.pb.go index 76ec8d52d684..d130f6f3dae3 100644 --- a/reflection/grpc_testing/test_grpc.pb.go +++ b/reflection/grpc_testing/test_grpc.pb.go @@ -1,4 +1,22 @@ +// Copyright 2017 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: reflection/grpc_testing/test.proto package grpc_testing @@ -14,6 +32,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + SearchService_Search_FullMethodName = "/grpc.testing.SearchService/Search" + SearchService_StreamingSearch_FullMethodName = "/grpc.testing.SearchService/StreamingSearch" +) + // SearchServiceClient is the client API for SearchService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -32,7 +55,7 @@ func NewSearchServiceClient(cc grpc.ClientConnInterface) SearchServiceClient { func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { out := new(SearchResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.SearchService/Search", in, out, opts...) + err := c.cc.Invoke(ctx, SearchService_Search_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -40,7 +63,7 @@ func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opt } func (c *searchServiceClient) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) { - stream, err := c.cc.NewStream(ctx, &SearchService_ServiceDesc.Streams[0], "/grpc.testing.SearchService/StreamingSearch", opts...) + stream, err := c.cc.NewStream(ctx, &SearchService_ServiceDesc.Streams[0], SearchService_StreamingSearch_FullMethodName, opts...) if err != nil { return nil, err } @@ -112,7 +135,7 @@ func _SearchService_Search_Handler(srv interface{}, ctx context.Context, dec fun } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.SearchService/Search", + FullMethod: SearchService_Search_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(SearchServiceServer).Search(ctx, req.(*SearchRequest)) diff --git a/reflection/grpc_testing_not_regenerate/README.md b/reflection/grpc_testing_not_regenerate/README.md new file mode 100644 index 000000000000..7f29ff61a537 --- /dev/null +++ b/reflection/grpc_testing_not_regenerate/README.md @@ -0,0 +1,5 @@ +testv3.pb.go is generated with an older version of codegen, to test reflection behavior with `grpc.SupportPackageIsVersion3`. DO NOT REGENERATE! + +testv3.pb.go is manually edited to replace `"golang.org/x/net/context"` with `"context"`. + +dynamic.pb.go is generated with the latest protoc and manually edited to remove everything except the descriptor bytes var, which is renamed and exported. \ No newline at end of file diff --git a/reflection/grpc_testing_not_regenerate/dynamic.pb.go b/reflection/grpc_testing_not_regenerate/dynamic.pb.go new file mode 100644 index 000000000000..35e4f02478b2 --- /dev/null +++ b/reflection/grpc_testing_not_regenerate/dynamic.pb.go @@ -0,0 +1,35 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc_testing_not_regenerate + +// FileDynamicProtoRawDesc is the descriptor for dynamic.proto, see README.md. +var FileDynamicProtoRawDesc = []byte{ + 0x0a, 0x0d, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0c, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x22, 0x0c, 0x0a, + 0x0a, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x22, 0x0c, 0x0a, 0x0a, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x71, 0x32, 0x57, 0x0a, 0x0e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x0f, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x31, 0x12, 0x18, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x64, 0x61, 0x74, 0x61, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} diff --git a/reflection/grpc_testing_not_regenerate/dynamic.proto b/reflection/grpc_testing_not_regenerate/dynamic.proto new file mode 100644 index 000000000000..5eeba0892336 --- /dev/null +++ b/reflection/grpc_testing_not_regenerate/dynamic.proto @@ -0,0 +1,33 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +syntax = "proto3"; + +option go_package = "google.golang.org/grpc/reflection/grpc_testing_not_regenerate"; + +package grpc.testing; + +message DynamicRes {} + +message DynamicReq {} + +// DynamicService is used to test reflection on dynamically constructed protocol +// buffer messages. +service DynamicService { + // DynamicMessage1 is a test RPC for dynamically constructed protobufs. + rpc DynamicMessage1(DynamicReq) returns (DynamicRes); +} \ No newline at end of file diff --git a/reflection/grpc_testingv3/testv3.pb.go b/reflection/grpc_testing_not_regenerate/testv3.pb.go similarity index 95% rename from reflection/grpc_testingv3/testv3.pb.go rename to reflection/grpc_testing_not_regenerate/testv3.pb.go index d7a69e546ead..8a690963ec10 100644 --- a/reflection/grpc_testingv3/testv3.pb.go +++ b/reflection/grpc_testing_not_regenerate/testv3.pb.go @@ -1,26 +1,44 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + // Code generated by protoc-gen-go. // source: testv3.proto // DO NOT EDIT! /* -Package grpc_testingv3 is a generated protocol buffer package. +Package grpc_testing_not_regenerate is a generated protocol buffer package. It is generated from these files: + testv3.proto It has these top-level messages: + SearchResponseV3 SearchRequestV3 */ -package grpc_testingv3 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package grpc_testing_not_regenerate import ( context "context" + fmt "fmt" + math "math" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" ) diff --git a/reflection/grpc_testing_not_regenerate/testv3.proto b/reflection/grpc_testing_not_regenerate/testv3.proto new file mode 100644 index 000000000000..44f93ba8b076 --- /dev/null +++ b/reflection/grpc_testing_not_regenerate/testv3.proto @@ -0,0 +1,58 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +syntax = "proto3"; + +option go_package = "google.golang.org/grpc/reflection/grpc_testing_not_regenerate"; + +package grpc.testingv3; + +message SearchResponseV3 { + message Result { + string url = 1; + string title = 2; + repeated string snippets = 3; + message Value { + oneof val { + string str = 1; + int64 int = 2; + double real = 3; + } + } + map metadata = 4; + } + enum State { + UNKNOWN = 0; + FRESH = 1; + STALE = 2; + } + repeated Result results = 1; + State state = 2; +} + +message SearchRequestV3 { + string query = 1; +} + +// SearchServiceV3 is used to test grpc server reflection. +service SearchServiceV3 { + // Search is a unary RPC. + rpc Search(SearchRequestV3) returns (SearchResponseV3); + + // StreamingSearch is a streaming RPC. + rpc StreamingSearch(stream SearchRequestV3) returns (stream SearchResponseV3); +} diff --git a/reflection/grpc_testingv3/README.md b/reflection/grpc_testingv3/README.md deleted file mode 100644 index 83d58756a440..000000000000 --- a/reflection/grpc_testingv3/README.md +++ /dev/null @@ -1,3 +0,0 @@ -The pb.go is genenated with an older version of codegen, to test reflection behavior with `grpc.SupportPackageIsVersion3`. DO NOT REGENERATE! - -pb.go is manually edited to replace `"golang.org/x/net/context"` with `"context"`. diff --git a/reflection/grpc_testingv3/testv3.proto b/reflection/grpc_testingv3/testv3.proto deleted file mode 100644 index 38a615a90d91..000000000000 --- a/reflection/grpc_testingv3/testv3.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -option go_package = "google.golang.org/grpc/reflection/grpc_testingv3"; - -package grpc.testingv3; - -message SearchResponseV3 { - message Result { - string url = 1; - string title = 2; - repeated string snippets = 3; - message Value { - oneof val { - string str = 1; - int64 int = 2; - double real = 3; - } - } - map metadata = 4; - } - enum State { - UNKNOWN = 0; - FRESH = 1; - STALE = 2; - } - repeated Result results = 1; - State state = 2; -} - -message SearchRequestV3 { - string query = 1; -} - -service SearchServiceV3 { - rpc Search(SearchRequestV3) returns (SearchResponseV3); - rpc StreamingSearch(stream SearchRequestV3) returns (stream SearchResponseV3); -} diff --git a/reflection/serverreflection.go b/reflection/serverreflection.go index d2696168b10c..76dae09d8886 100644 --- a/reflection/serverreflection.go +++ b/reflection/serverreflection.go @@ -23,6 +23,7 @@ The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" s := grpc.NewServer() @@ -32,360 +33,227 @@ To register server reflection on a gRPC server: reflection.Register(s) s.Serve(lis) - */ package reflection // import "google.golang.org/grpc/reflection" import ( - "bytes" - "compress/gzip" - "fmt" "io" - "io/ioutil" - "reflect" "sort" - "sync" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" "google.golang.org/grpc/codes" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) -type serverReflectionServer struct { - rpb.UnimplementedServerReflectionServer - s *grpc.Server - - initSymbols sync.Once - serviceNames []string - symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files -} - -// Register registers the server reflection service on the given gRPC server. -func Register(s *grpc.Server) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - }) -} - -// protoMessage is used for type assertion on proto messages. -// Generated proto message implements function Descriptor(), but Descriptor() -// is not part of interface proto.Message. This interface is needed to -// call Descriptor(). -type protoMessage interface { - Descriptor() ([]byte, []int) -} - -func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { - s.initSymbols.Do(func() { - serviceInfo := s.s.GetServiceInfo() - - s.symbols = map[string]*dpb.FileDescriptorProto{} - s.serviceNames = make([]string, 0, len(serviceInfo)) - processed := map[string]struct{}{} - for svc, info := range serviceInfo { - s.serviceNames = append(s.serviceNames, svc) - fdenc, ok := parseMetadata(info.Metadata) - if !ok { - continue - } - fd, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fd, processed) - } - sort.Strings(s.serviceNames) - }) - - return s.serviceNames, s.symbols -} - -func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { - filename := fd.GetName() - if _, ok := processed[filename]; ok { - return - } - processed[filename] = struct{}{} - - prefix := fd.GetPackage() - - for _, msg := range fd.MessageType { - s.processMessage(fd, prefix, msg) - } - for _, en := range fd.EnumType { - s.processEnum(fd, prefix, en) - } - for _, ext := range fd.Extension { - s.processField(fd, prefix, ext) - } - for _, svc := range fd.Service { - svcName := fqn(prefix, svc.GetName()) - s.symbols[svcName] = fd - for _, meth := range svc.Method { - name := fqn(svcName, meth.GetName()) - s.symbols[name] = fd - } - } - - for _, dep := range fd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - s.processFile(fdDep, processed) - } +// GRPCServer is the interface provided by a gRPC server. It is implemented by +// *grpc.Server, but could also be implemented by other concrete types. It acts +// as a registry, for accumulating the services exposed by the server. +type GRPCServer interface { + grpc.ServiceRegistrar + ServiceInfoProvider } -func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { - msgName := fqn(prefix, msg.GetName()) - s.symbols[msgName] = fd +var _ GRPCServer = (*grpc.Server)(nil) - for _, nested := range msg.NestedType { - s.processMessage(fd, msgName, nested) - } - for _, en := range msg.EnumType { - s.processEnum(fd, msgName, en) - } - for _, ext := range msg.Extension { - s.processField(fd, msgName, ext) - } - for _, fld := range msg.Field { - s.processField(fd, msgName, fld) - } - for _, oneof := range msg.OneofDecl { - oneofName := fqn(msgName, oneof.GetName()) - s.symbols[oneofName] = fd - } +// Register registers the server reflection service on the given gRPC server. +// Both the v1 and v1alpha versions are registered. +func Register(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) } -func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { - enName := fqn(prefix, en.GetName()) - s.symbols[enName] = fd - - for _, val := range en.Value { - valName := fqn(enName, val.GetName()) - s.symbols[valName] = fd - } +// RegisterV1 registers only the v1 version of the server reflection service +// on the given gRPC server. Many clients may only support v1alpha so most +// users should use Register instead, at least until clients have upgraded. +func RegisterV1(s GRPCServer) { + svr := NewServerV1(ServerOptions{Services: s}) + v1reflectiongrpc.RegisterServerReflectionServer(s, svr) } -func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { - fldName := fqn(prefix, fld.GetName()) - s.symbols[fldName] = fd +// ServiceInfoProvider is an interface used to retrieve metadata about the +// services to expose. +// +// The reflection service is only interested in the service names, but the +// signature is this way so that *grpc.Server implements it. So it is okay +// for a custom implementation to return zero values for the +// grpc.ServiceInfo values in the map. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServiceInfoProvider interface { + GetServiceInfo() map[string]grpc.ServiceInfo } -func fqn(prefix, name string) string { - if prefix == "" { - return name - } - return prefix + "." + name +// ExtensionResolver is the interface used to query details about extensions. +// This interface is satisfied by protoregistry.GlobalTypes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ExtensionResolver interface { + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) } -// fileDescForType gets the file descriptor for the given type. -// The given type should be a proto message. -func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - enc, _ := m.Descriptor() - - return decodeFileDesc(enc) +// ServerOptions represents the options used to construct a reflection server. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerOptions struct { + // The source of advertised RPC services. If not specified, the reflection + // server will report an empty list when asked to list services. + // + // This value will typically be a *grpc.Server. But the set of advertised + // services can be customized by wrapping a *grpc.Server or using an + // alternate implementation that returns a custom set of service names. + Services ServiceInfoProvider + // Optional resolver used to load descriptors. If not specified, + // protoregistry.GlobalFiles will be used. + DescriptorResolver protodesc.Resolver + // Optional resolver used to query for known extensions. If not specified, + // protoregistry.GlobalTypes will be used. + ExtensionResolver ExtensionResolver } -// decodeFileDesc does decompression and unmarshalling on the given -// file descriptor byte slice. -func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { - raw, err := decompress(enc) - if err != nil { - return nil, fmt.Errorf("failed to decompress enc: %v", err) - } - - fd := new(dpb.FileDescriptorProto) - if err := proto.Unmarshal(raw, fd); err != nil { - return nil, fmt.Errorf("bad descriptor: %v", err) - } - return fd, nil +// NewServer returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. For backwards compatibility reasons, +// this returns the v1alpha version of the reflection server. For a v1 version +// of the reflection server, see NewServerV1. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { + return asV1Alpha(NewServerV1(opts)) } -// decompress does gzip decompression. -func decompress(b []byte) ([]byte, error) { - r, err := gzip.NewReader(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) - } - out, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("bad gzipped descriptor: %v", err) +// NewServerV1 returns a reflection server implementation using the given options. +// This can be used to customize behavior of the reflection service. Most usages +// should prefer to use Register instead. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { + if opts.DescriptorResolver == nil { + opts.DescriptorResolver = protoregistry.GlobalFiles + } + if opts.ExtensionResolver == nil { + opts.ExtensionResolver = protoregistry.GlobalTypes + } + return &serverReflectionServer{ + s: opts.Services, + descResolver: opts.DescriptorResolver, + extResolver: opts.ExtensionResolver, } - return out, nil } -func typeForName(name string) (reflect.Type, error) { - pt := proto.MessageType(name) - if pt == nil { - return nil, fmt.Errorf("unknown type: %q", name) - } - st := pt.Elem() - - return st, nil -} - -func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - var extDesc *proto.ExtensionDesc - for id, desc := range proto.RegisteredExtensions(m) { - if id == ext { - extDesc = desc - break - } - } - - if extDesc == nil { - return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) - } - - return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) -} - -func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) - if !ok { - return nil, fmt.Errorf("failed to create message from type: %v", st) - } - - exts := proto.RegisteredExtensions(m) - out := make([]int32, 0, len(exts)) - for id := range exts { - out = append(out, id) - } - return out, nil +type serverReflectionServer struct { + v1alphareflectiongrpc.UnimplementedServerReflectionServer + s ServiceInfoProvider + descResolver protodesc.Resolver + extResolver ExtensionResolver } // fileDescWithDependencies returns a slice of serialized fileDescriptors in // wire format ([]byte). The fileDescriptors will include fd and all the // transitive dependencies of fd with names not in sentFileDescriptors. -func fileDescWithDependencies(fd *dpb.FileDescriptorProto, sentFileDescriptors map[string]bool) ([][]byte, error) { - r := [][]byte{} - queue := []*dpb.FileDescriptorProto{fd} +func (s *serverReflectionServer) fileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { + var r [][]byte + queue := []protoreflect.FileDescriptor{fd} for len(queue) > 0 { currentfd := queue[0] queue = queue[1:] - if sent := sentFileDescriptors[currentfd.GetName()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.GetName()] = true - currentfdEncoded, err := proto.Marshal(currentfd) + if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { + sentFileDescriptors[currentfd.Path()] = true + fdProto := protodesc.ToFileDescriptorProto(currentfd) + currentfdEncoded, err := proto.Marshal(fdProto) if err != nil { return nil, err } r = append(r, currentfdEncoded) } - for _, dep := range currentfd.Dependency { - fdenc := proto.FileDescriptor(dep) - fdDep, err := decodeFileDesc(fdenc) - if err != nil { - continue - } - queue = append(queue, fdDep) + for i := 0; i < currentfd.Imports().Len(); i++ { + queue = append(queue, currentfd.Imports().Get(i)) } } return r, nil } -// fileDescEncodingByFilename finds the file descriptor for given filename, -// finds all of its previously unsent transitive dependencies, does marshalling -// on them, and returns the marshalled result. -func (s *serverReflectionServer) fileDescEncodingByFilename(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - enc := proto.FileDescriptor(name) - if enc == nil { - return nil, fmt.Errorf("unknown file: %v", name) - } - fd, err := decodeFileDesc(enc) - if err != nil { - return nil, err - } - return fileDescWithDependencies(fd, sentFileDescriptors) -} - -// parseMetadata finds the file descriptor bytes specified meta. -// For SupportPackageIsVersion4, m is the name of the proto file, we -// call proto.FileDescriptor to get the byte slice. -// For SupportPackageIsVersion3, m is a byte slice itself. -func parseMetadata(meta interface{}) ([]byte, bool) { - // Check if meta is the file name. - if fileNameForMeta, ok := meta.(string); ok { - return proto.FileDescriptor(fileNameForMeta), true - } - - // Check if meta is the byte slice. - if enc, ok := meta.([]byte); ok { - return enc, true - } - - return nil, false -} - // fileDescEncodingContainingSymbol finds the file descriptor containing the // given symbol, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. The given symbol // can be a type, a service or a method. func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - _, symbols := s.getSymbols() - fd := symbols[name] - if fd == nil { - // Check if it's a type name that was not present in the - // transitive dependencies of the registered services. - if st, err := typeForName(name); err == nil { - fd, err = s.fileDescForType(st) - if err != nil { - return nil, err - } - } - } - - if fd == nil { - return nil, fmt.Errorf("unknown symbol: %v", name) + d, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)) + if err != nil { + return nil, err } - - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(d.ParentFile(), sentFileDescriptors) } // fileDescEncodingContainingExtension finds the file descriptor containing // given extension, finds all of its previously unsent transitive dependencies, // does marshalling on them, and returns the marshalled result. func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - st, err := typeForName(typeName) - if err != nil { - return nil, err - } - fd, err := fileDescContainingExtension(st, extNum) + xt, err := s.extResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) if err != nil { return nil, err } - return fileDescWithDependencies(fd, sentFileDescriptors) + return s.fileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) } // allExtensionNumbersForTypeName returns all extension numbers for the given type. func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { - st, err := typeForName(name) - if err != nil { - return nil, err + var numbers []int32 + s.extResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { + numbers = append(numbers, int32(xt.TypeDescriptor().Number())) + return true + }) + sort.Slice(numbers, func(i, j int) bool { + return numbers[i] < numbers[j] + }) + if len(numbers) == 0 { + // maybe return an error if given type name is not known + if _, err := s.descResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { + return nil, err + } } - extNums, err := s.allExtensionNumbersForType(st) - if err != nil { - return nil, err + return numbers, nil +} + +// listServices returns the names of services this server exposes. +func (s *serverReflectionServer) listServices() []*v1reflectionpb.ServiceResponse { + serviceInfo := s.s.GetServiceInfo() + resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) + for svc := range serviceInfo { + resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) } - return extNums, nil + sort.Slice(resp, func(i, j int) bool { + return resp[i].Name < resp[j].Name + }) + return resp } // ServerReflectionInfo is the reflection service handler. -func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { +func (s *serverReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { in, err := stream.Recv() @@ -396,83 +264,80 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio return err } - out := &rpb.ServerReflectionResponse{ + out := &v1reflectionpb.ServerReflectionResponse{ ValidHost: in.Host, OriginalRequest: in, } switch req := in.MessageRequest.(type) { - case *rpb.ServerReflectionRequest_FileByFilename: - b, err := s.fileDescEncodingByFilename(req.FileByFilename, sentFileDescriptors) + case *v1reflectionpb.ServerReflectionRequest_FileByFilename: + var b [][]byte + fd, err := s.descResolver.FindFileByPath(req.FileByFilename) + if err == nil { + b, err = s.fileDescWithDependencies(fd, sentFileDescriptors) + } if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_FileContainingSymbol: + case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_FileContainingExtension: + case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: typeName := req.FileContainingExtension.ContainingType extNum := req.FileContainingExtension.ExtensionNumber b, err := s.fileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: b}, + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, } } - case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) if err != nil { - out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &rpb.ErrorResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ ErrorCode: int32(codes.NotFound), ErrorMessage: err.Error(), }, } } else { - out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ BaseTypeName: req.AllExtensionNumbersOfType, ExtensionNumber: extNums, }, } } - case *rpb.ServerReflectionRequest_ListServices: - svcNames, _ := s.getSymbols() - serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) - for i, n := range svcNames { - serviceResponses[i] = &rpb.ServiceResponse{ - Name: n, - } - } - out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &rpb.ListServiceResponse{ - Service: serviceResponses, + case *v1reflectionpb.ServerReflectionRequest_ListServices: + out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: s.listServices(), }, } default: diff --git a/reflection/serverreflection_test.go b/reflection/serverreflection_test.go index 24070141c2f2..e61fd22d5a18 100644 --- a/reflection/serverreflection_test.go +++ b/reflection/serverreflection_test.go @@ -27,29 +27,39 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" + + v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" + v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" pb "google.golang.org/grpc/reflection/grpc_testing" - pbv3 "google.golang.org/grpc/reflection/grpc_testingv3" + pbv3 "google.golang.org/grpc/reflection/grpc_testing_not_regenerate" ) var ( - s = &serverReflectionServer{} + s = NewServerV1(ServerOptions{}).(*serverReflectionServer) // fileDescriptor of each test proto file. - fdTest *dpb.FileDescriptorProto - fdTestv3 *dpb.FileDescriptorProto - fdProto2 *dpb.FileDescriptorProto - fdProto2Ext *dpb.FileDescriptorProto - fdProto2Ext2 *dpb.FileDescriptorProto + fdProto2Ext *descriptorpb.FileDescriptorProto + fdProto2Ext2 *descriptorpb.FileDescriptorProto + fdDynamic *descriptorpb.FileDescriptorProto + // reflection descriptors. + fdDynamicFile protoreflect.FileDescriptor // fileDescriptor marshalled. fdTestByte []byte fdTestv3Byte []byte fdProto2Byte []byte fdProto2ExtByte []byte fdProto2Ext2Byte []byte + fdDynamicByte []byte ) const defaultTestTimeout = 10 * time.Second @@ -62,85 +72,78 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, x{}) } -func loadFileDesc(filename string) (*dpb.FileDescriptorProto, []byte) { - enc := proto.FileDescriptor(filename) - if enc == nil { - panic(fmt.Sprintf("failed to find fd for file: %v", filename)) - } - fd, err := decodeFileDesc(enc) +func loadFileDesc(filename string) (*descriptorpb.FileDescriptorProto, []byte) { + fd, err := protoregistry.GlobalFiles.FindFileByPath(filename) if err != nil { - panic(fmt.Sprintf("failed to decode enc: %v", err)) + panic(err) } - b, err := proto.Marshal(fd) + fdProto := protodesc.ToFileDescriptorProto(fd) + b, err := proto.Marshal(fdProto) if err != nil { panic(fmt.Sprintf("failed to marshal fd: %v", err)) } - return fd, b + return fdProto, b } -func init() { - fdTest, fdTestByte = loadFileDesc("reflection/grpc_testing/test.proto") - fdTestv3, fdTestv3Byte = loadFileDesc("testv3.proto") - fdProto2, fdProto2Byte = loadFileDesc("reflection/grpc_testing/proto2.proto") - fdProto2Ext, fdProto2ExtByte = loadFileDesc("reflection/grpc_testing/proto2_ext.proto") - fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("reflection/grpc_testing/proto2_ext2.proto") -} +func loadFileDescDynamic(b []byte) (*descriptorpb.FileDescriptorProto, protoreflect.FileDescriptor, []byte) { + m := new(descriptorpb.FileDescriptorProto) + if err := proto.Unmarshal(b, m); err != nil { + panic("failed to unmarshal dynamic proto raw descriptor") + } -func (x) TestFileDescForType(t *testing.T) { - for _, test := range []struct { - st reflect.Type - wantFd *dpb.FileDescriptorProto - }{ - {reflect.TypeOf(pb.SearchResponse_Result{}), fdTest}, - {reflect.TypeOf(pb.ToBeExtended{}), fdProto2}, - } { - fd, err := s.fileDescForType(test.st) - if err != nil || !proto.Equal(fd, test.wantFd) { - t.Errorf("fileDescForType(%q) = %q, %v, want %q, ", test.st, fd, err, test.wantFd) - } + fd, err := protodesc.NewFile(m, nil) + if err != nil { + panic(err) } -} -func (x) TestTypeForName(t *testing.T) { - for _, test := range []struct { - name string - want reflect.Type - }{ - {"grpc.testing.SearchResponse", reflect.TypeOf(pb.SearchResponse{})}, - } { - r, err := typeForName(test.name) - if err != nil || r != test.want { - t.Errorf("typeForName(%q) = %q, %v, want %q, ", test.name, r, err, test.want) - } + err = protoregistry.GlobalFiles.RegisterFile(fd) + if err != nil { + panic(err) } -} -func (x) TestTypeForNameNotFound(t *testing.T) { - for _, test := range []string{ - "grpc.testing.not_exiting", - } { - _, err := typeForName(test) - if err == nil { - t.Errorf("typeForName(%q) = _, %v, want _, ", test, err) + for i := 0; i < fd.Messages().Len(); i++ { + m := fd.Messages().Get(i) + if err := protoregistry.GlobalTypes.RegisterMessage(dynamicpb.NewMessageType(m)); err != nil { + panic(err) } } + + return m, fd, b +} + +func init() { + _, fdTestByte = loadFileDesc("reflection/grpc_testing/test.proto") + _, fdTestv3Byte = loadFileDesc("testv3.proto") + _, fdProto2Byte = loadFileDesc("reflection/grpc_testing/proto2.proto") + fdProto2Ext, fdProto2ExtByte = loadFileDesc("reflection/grpc_testing/proto2_ext.proto") + fdProto2Ext2, fdProto2Ext2Byte = loadFileDesc("reflection/grpc_testing/proto2_ext2.proto") + fdDynamic, fdDynamicFile, fdDynamicByte = loadFileDescDynamic(pbv3.FileDynamicProtoRawDesc) } func (x) TestFileDescContainingExtension(t *testing.T) { for _, test := range []struct { - st reflect.Type + st string extNum int32 - want *dpb.FileDescriptorProto + want *descriptorpb.FileDescriptorProto }{ - {reflect.TypeOf(pb.ToBeExtended{}), 13, fdProto2Ext}, - {reflect.TypeOf(pb.ToBeExtended{}), 17, fdProto2Ext}, - {reflect.TypeOf(pb.ToBeExtended{}), 19, fdProto2Ext}, - {reflect.TypeOf(pb.ToBeExtended{}), 23, fdProto2Ext2}, - {reflect.TypeOf(pb.ToBeExtended{}), 29, fdProto2Ext2}, + {"grpc.testing.ToBeExtended", 13, fdProto2Ext}, + {"grpc.testing.ToBeExtended", 17, fdProto2Ext}, + {"grpc.testing.ToBeExtended", 19, fdProto2Ext}, + {"grpc.testing.ToBeExtended", 23, fdProto2Ext2}, + {"grpc.testing.ToBeExtended", 29, fdProto2Ext2}, } { - fd, err := fileDescContainingExtension(test.st, test.extNum) - if err != nil || !proto.Equal(fd, test.want) { - t.Errorf("fileDescContainingExtension(%q) = %q, %v, want %q, ", test.st, fd, err, test.want) + fd, err := s.fileDescEncodingContainingExtension(test.st, test.extNum, map[string]bool{}) + if err != nil { + t.Errorf("fileDescContainingExtension(%q) return error: %v", test.st, err) + continue + } + var actualFd descriptorpb.FileDescriptorProto + if err := proto.Unmarshal(fd[0], &actualFd); err != nil { + t.Errorf("fileDescContainingExtension(%q) return invalid bytes: %v", test.st, err) + continue + } + if !proto.Equal(&actualFd, test.want) { + t.Errorf("fileDescContainingExtension(%q) returned %q, but wanted %q", test.st, &actualFd, test.want) } } } @@ -152,14 +155,14 @@ func (s intArray) Len() int { return len(s) } func (s intArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s intArray) Less(i, j int) bool { return s[i] < s[j] } -func (x) TestAllExtensionNumbersForType(t *testing.T) { +func (x) TestAllExtensionNumbersForTypeName(t *testing.T) { for _, test := range []struct { - st reflect.Type + st string want []int32 }{ - {reflect.TypeOf(pb.ToBeExtended{}), []int32{13, 17, 19, 23, 29}}, + {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, } { - r, err := s.allExtensionNumbersForType(test.st) + r, err := s.allExtensionNumbersForTypeName(test.st) sort.Sort(intArray(r)) if err != nil || !reflect.DeepEqual(r, test.want) { t.Errorf("allExtensionNumbersForType(%q) = %v, %v, want %v, ", test.st, r, err, test.want) @@ -200,44 +203,65 @@ func (x) TestReflectionEnd2end(t *testing.T) { s := grpc.NewServer() pb.RegisterSearchServiceServer(s, &server{}) pbv3.RegisterSearchServiceV3Server(s, &serverV3{}) + + registerDynamicProto(s, fdDynamic, fdDynamicFile) + // Register reflection service on s. Register(s) go s.Serve(lis) + t.Cleanup(s.Stop) // Create client. - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("cannot connect to server: %v", err) } defer conn.Close() - c := rpb.NewServerReflectionClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - stream, err := c.ServerReflectionInfo(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("cannot get ServerReflectionInfo: %v", err) + clientV1 := v1reflectiongrpc.NewServerReflectionClient(conn) + clientV1Alpha := v1alphareflectiongrpc.NewServerReflectionClient(conn) + testCases := []struct { + name string + client v1reflectiongrpc.ServerReflectionClient + }{ + { + name: "v1", + client: clientV1, + }, + { + name: "v1alpha", + client: v1AlphaClientAdapter{stub: clientV1Alpha}, + }, } + for _, testCase := range testCases { + c := testCase.client + t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := c.ServerReflectionInfo(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("cannot get ServerReflectionInfo: %v", err) + } - testFileByFilenameTransitiveClosure(t, stream, true) - testFileByFilenameTransitiveClosure(t, stream, false) - testFileByFilename(t, stream) - testFileByFilenameError(t, stream) - testFileContainingSymbol(t, stream) - testFileContainingSymbolError(t, stream) - testFileContainingExtension(t, stream) - testFileContainingExtensionError(t, stream) - testAllExtensionNumbersOfType(t, stream) - testAllExtensionNumbersOfTypeError(t, stream) - testListServices(t, stream) - - s.Stop() + testFileByFilenameTransitiveClosure(t, stream, true) + testFileByFilenameTransitiveClosure(t, stream, false) + testFileByFilename(t, stream) + testFileByFilenameError(t, stream) + testFileContainingSymbol(t, stream) + testFileContainingSymbolError(t, stream) + testFileContainingExtension(t, stream) + testFileContainingExtensionError(t, stream) + testAllExtensionNumbersOfType(t, stream) + testAllExtensionNumbersOfTypeError(t, stream) + testListServices(t, stream) + }) + } } -func testFileByFilenameTransitiveClosure(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient, expectClosure bool) { +func testFileByFilenameTransitiveClosure(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient, expectClosure bool) { filename := "reflection/grpc_testing/proto2_ext2.proto" - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileByFilename{ FileByFilename: filename, }, }); err != nil { @@ -249,7 +273,7 @@ func testFileByFilenameTransitiveClosure(t *testing.T, stream rpb.ServerReflecti t.Fatalf("failed to recv response: %v", err) } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], fdProto2Ext2Byte) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], fdProto2Ext2Byte) } @@ -267,7 +291,7 @@ func testFileByFilenameTransitiveClosure(t *testing.T, stream rpb.ServerReflecti } } -func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileByFilename(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { filename string want []byte @@ -275,9 +299,10 @@ func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflecti {"reflection/grpc_testing/test.proto", fdTestByte}, {"reflection/grpc_testing/proto2.proto", fdProto2Byte}, {"reflection/grpc_testing/proto2_ext.proto", fdProto2ExtByte}, + {"dynamic.proto", fdDynamicByte}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileByFilename{ FileByFilename: test.filename, }, }); err != nil { @@ -290,7 +315,7 @@ func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflecti } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileByFilename(%v)\nreceived: %q,\nwant: %q", test.filename, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -300,14 +325,14 @@ func testFileByFilename(t *testing.T, stream rpb.ServerReflection_ServerReflecti } } -func testFileByFilenameError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileByFilenameError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "test.poto", "proo2.proto", "proto2_et.proto", } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileByFilename{ FileByFilename: test, }, }); err != nil { @@ -320,14 +345,14 @@ func testFileByFilenameError(t *testing.T, stream rpb.ServerReflection_ServerRef } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileByFilename(%v) = %v, want type ", test, r.MessageResponse) } } } -func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingSymbol(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { symbol string want []byte @@ -347,10 +372,14 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe {"grpc.testingv3.SearchResponseV3.Result.Value.val", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.Result.Value.str", fdTestv3Byte}, {"grpc.testingv3.SearchResponseV3.State", fdTestv3Byte}, - {"grpc.testingv3.SearchResponseV3.State.FRESH", fdTestv3Byte}, + {"grpc.testingv3.SearchResponseV3.FRESH", fdTestv3Byte}, + // Test dynamic symbols + {"grpc.testing.DynamicService", fdDynamicByte}, + {"grpc.testing.DynamicReq", fdDynamicByte}, + {"grpc.testing.DynamicRes", fdDynamicByte}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test.symbol, }, }); err != nil { @@ -363,7 +392,7 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingSymbol(%v)\nreceived: %q,\nwant: %q", test.symbol, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -373,15 +402,15 @@ func testFileContainingSymbol(t *testing.T, stream rpb.ServerReflection_ServerRe } } -func testFileContainingSymbolError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingSymbolError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.SerchService", "grpc.testing.SearchService.SearchE", "grpc.tesing.SearchResponse", "gpc.testing.ToBeExtended", } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ FileContainingSymbol: test, }, }); err != nil { @@ -394,14 +423,14 @@ func testFileContainingSymbolError(t *testing.T, stream rpb.ServerReflection_Ser } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingSymbol(%v) = %v, want type ", test, r.MessageResponse) } } } -func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingExtension(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 @@ -413,9 +442,9 @@ func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_Serve {"grpc.testing.ToBeExtended", 23, fdProto2Ext2Byte}, {"grpc.testing.ToBeExtended", 29, fdProto2Ext2Byte}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &rpb.ExtensionRequest{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, @@ -430,7 +459,7 @@ func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_Serve } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_FileDescriptorResponse: + case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: if !reflect.DeepEqual(r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) { t.Errorf("FileContainingExtension(%v, %v)\nreceived: %q,\nwant: %q", test.typeName, test.extNum, r.GetFileDescriptorResponse().FileDescriptorProto[0], test.want) } @@ -440,7 +469,7 @@ func testFileContainingExtension(t *testing.T, stream rpb.ServerReflection_Serve } } -func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testFileContainingExtensionError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string extNum int32 @@ -448,9 +477,9 @@ func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ {"grpc.testing.ToBExtended", 17}, {"grpc.testing.ToBeExtended", 15}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &rpb.ExtensionRequest{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ + FileContainingExtension: &v1reflectionpb.ExtensionRequest{ ContainingType: test.typeName, ExtensionNumber: test.extNum, }, @@ -465,22 +494,23 @@ func testFileContainingExtensionError(t *testing.T, stream rpb.ServerReflection_ } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("FileContainingExtension(%v, %v) = %v, want type ", test.typeName, test.extNum, r.MessageResponse) } } } -func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testAllExtensionNumbersOfType(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []struct { typeName string want []int32 }{ {"grpc.testing.ToBeExtended", []int32{13, 17, 19, 23, 29}}, + {"grpc.testing.DynamicReq", nil}, } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test.typeName, }, }); err != nil { @@ -493,7 +523,7 @@ func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_Ser } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_AllExtensionNumbersResponse: + case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: extNum := r.GetAllExtensionNumbersResponse().ExtensionNumber sort.Sort(intArray(extNum)) if r.GetAllExtensionNumbersResponse().BaseTypeName != test.typeName || @@ -506,12 +536,12 @@ func testAllExtensionNumbersOfType(t *testing.T, stream rpb.ServerReflection_Ser } } -func testAllExtensionNumbersOfTypeError(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { +func testAllExtensionNumbersOfTypeError(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { for _, test := range []string{ "grpc.testing.ToBeExtendedE", } { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{ + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ AllExtensionNumbersOfType: test, }, }); err != nil { @@ -524,16 +554,16 @@ func testAllExtensionNumbersOfTypeError(t *testing.T, stream rpb.ServerReflectio } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ErrorResponse: + case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: default: t.Errorf("AllExtensionNumbersOfType(%v) = %v, want type ", test, r.MessageResponse) } } } -func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflectionInfoClient) { - if err := stream.Send(&rpb.ServerReflectionRequest{ - MessageRequest: &rpb.ServerReflectionRequest_ListServices{}, +func testListServices(t *testing.T, stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient) { + if err := stream.Send(&v1reflectionpb.ServerReflectionRequest{ + MessageRequest: &v1reflectionpb.ServerReflectionRequest_ListServices{}, }); err != nil { t.Fatalf("failed to send request: %v", err) } @@ -544,12 +574,14 @@ func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflection } switch r.MessageResponse.(type) { - case *rpb.ServerReflectionResponse_ListServicesResponse: + case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: services := r.GetListServicesResponse().Service want := []string{ "grpc.testingv3.SearchServiceV3", "grpc.testing.SearchService", + "grpc.reflection.v1.ServerReflection", "grpc.reflection.v1alpha.ServerReflection", + "grpc.testing.DynamicService", } // Compare service names in response with want. if len(services) != len(want) { @@ -570,3 +602,107 @@ func testListServices(t *testing.T, stream rpb.ServerReflection_ServerReflection t.Errorf("ListServices = %v, want type ", r.MessageResponse) } } + +func registerDynamicProto(srv *grpc.Server, fdp *descriptorpb.FileDescriptorProto, fd protoreflect.FileDescriptor) { + type emptyInterface interface{} + + for i := 0; i < fd.Services().Len(); i++ { + s := fd.Services().Get(i) + + sd := &grpc.ServiceDesc{ + ServiceName: string(s.FullName()), + HandlerType: (*emptyInterface)(nil), + Metadata: fdp.GetName(), + } + + for j := 0; j < s.Methods().Len(); j++ { + m := s.Methods().Get(j) + sd.Methods = append(sd.Methods, grpc.MethodDesc{ + MethodName: string(m.Name()), + }) + } + + srv.RegisterService(sd, struct{}{}) + } +} + +type v1AlphaClientAdapter struct { + stub v1alphareflectiongrpc.ServerReflectionClient +} + +func (v v1AlphaClientAdapter) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (v1reflectiongrpc.ServerReflection_ServerReflectionInfoClient, error) { + stream, err := v.stub.ServerReflectionInfo(ctx, opts...) + if err != nil { + return nil, err + } + return v1AlphaClientStreamAdapter{stream}, nil +} + +type v1AlphaClientStreamAdapter struct { + v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoClient +} + +func (s v1AlphaClientStreamAdapter) Send(request *v1reflectionpb.ServerReflectionRequest) error { + return s.ServerReflection_ServerReflectionInfoClient.Send(v1ToV1AlphaRequest(request)) +} + +func (s v1AlphaClientStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionResponse, error) { + resp, err := s.ServerReflection_ServerReflectionInfoClient.Recv() + if err != nil { + return nil, err + } + return v1AlphaToV1Response(resp), nil +} + +func v1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse { + var v1 v1reflectionpb.ServerReflectionResponse + v1.ValidHost = v1alpha.ValidHost + if v1alpha.OriginalRequest != nil { + v1.OriginalRequest = v1AlphaToV1Request(v1alpha.OriginalRequest) + } + switch mr := v1alpha.MessageResponse.(type) { + case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{ + FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ + BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), + ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse: + if mr != nil { + svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) + for i, svc := range mr.ListServicesResponse.GetService() { + svcs[i] = &v1reflectionpb.ServiceResponse{ + Name: svc.GetName(), + } + } + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &v1reflectionpb.ListServiceResponse{ + Service: svcs, + }, + } + } + case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse: + if mr != nil { + v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &v1reflectionpb.ErrorResponse{ + ErrorCode: mr.ErrorResponse.GetErrorCode(), + ErrorMessage: mr.ErrorResponse.GetErrorMessage(), + }, + } + } + default: + // no value set + } + return &v1 +} diff --git a/regenerate.sh b/regenerate.sh index fc6725b89f84..a6f26c8ab0f0 100755 --- a/regenerate.sh +++ b/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -48,11 +48,6 @@ mkdir -p ${WORKDIR}/googleapis/google/rpc echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto -# Pull in the MeshCA service proto. -mkdir -p ${WORKDIR}/istio/istio/google/security/meshca/v1 -echo "curl https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto" -curl --silent https://raw.githubusercontent.com/istio/istio/master/security/proto/providers/google/meshca.proto > ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto - mkdir -p ${WORKDIR}/out # Generates sources without the embed requirement @@ -62,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -73,16 +69,27 @@ SOURCES=( ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto ${WORKDIR}/grpc-proto/grpc/testing/*.proto ${WORKDIR}/grpc-proto/grpc/core/*.proto - ${WORKDIR}/istio/istio/google/security/meshca/v1/meshca.proto ) # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -91,7 +98,6 @@ for src in ${SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -102,28 +108,16 @@ for src in ${LEGACY_SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done # The go_package option in grpc/lookup/v1/rls.proto doesn't match the # current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 - -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go - -# grpc/service_config/service_config.proto does not have a go_package option. -mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config - -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# istio/google/security/meshca/v1/meshca.proto does not have a go_package option. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ -mv ${WORKDIR}/out/istio/google/security/meshca/v1/* ${WORKDIR}/out/google.golang.org/grpc/credentials/tls/certprovider/meshca/internal/v1/ +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/resolver/manual/manual.go b/resolver/manual/manual.go index 3679d702ab96..f27978e1281f 100644 --- a/resolver/manual/manual.go +++ b/resolver/manual/manual.go @@ -21,13 +21,17 @@ package manual import ( + "sync" + "google.golang.org/grpc/resolver" ) // NewBuilderWithScheme creates a new test resolver builder with the given scheme. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, scheme: scheme, } } @@ -35,13 +39,20 @@ func NewBuilderWithScheme(scheme string) *Resolver { // Resolver is also a resolver builder. // It's build() function always returns itself. type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) // ResolveNowCallback is called when the ResolveNow method is called on the // resolver. Must not be nil. Must not be changed after the resolver may // be built. ResolveNowCallback func(resolver.ResolveNowOptions) - scheme string + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string // Fields actually belong to the resolver. + mu sync.Mutex // Guards access to CC. CC resolver.ClientConn bootstrapState *resolver.State } @@ -54,7 +65,10 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.mu.Lock() r.CC = cc + r.mu.Unlock() + r.BuildCallback(target, cc, opts) if r.bootstrapState != nil { r.UpdateState(*r.bootstrapState) } @@ -72,9 +86,20 @@ func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { } // Close is a noop for Resolver. -func (*Resolver) Close() {} +func (r *Resolver) Close() { + r.CloseCallback() +} // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() r.CC.UpdateState(s) + r.mu.Unlock() +} + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.mu.Lock() + r.CC.ReportError(err) + r.mu.Unlock() } diff --git a/resolver/map.go b/resolver/map.go new file mode 100644 index 000000000000..efcb7f3efd82 --- /dev/null +++ b/resolver/map.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value interface{} +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value interface{}) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []interface{} { + ret := make([]interface{}, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/resolver/map_test.go b/resolver/map_test.go new file mode 100644 index 000000000000..0b0ac1667902 --- /dev/null +++ b/resolver/map_test.go @@ -0,0 +1,163 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "fmt" + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/attributes" +) + +// Note: each address is different from addr1 by one value. addr7 matches +// addr1, since the only difference is BalancerAttributes, which are not +// compared. +var ( + addr1 = Address{Addr: "a1", Attributes: attributes.New("a1", 3), ServerName: "s1"} + addr2 = Address{Addr: "a2", Attributes: attributes.New("a1", 3), ServerName: "s1"} + addr3 = Address{Addr: "a1", Attributes: attributes.New("a2", 3), ServerName: "s1"} + addr4 = Address{Addr: "a1", Attributes: attributes.New("a1", 2), ServerName: "s1"} + addr5 = Address{Addr: "a1", Attributes: attributes.New("a1", "3"), ServerName: "s1"} + addr6 = Address{Addr: "a1", Attributes: attributes.New("a1", 3), ServerName: "s2"} + addr7 = Address{Addr: "a1", Attributes: attributes.New("a1", 3), ServerName: "s1", BalancerAttributes: attributes.New("xx", 3)} +) + +func (s) TestAddressMap_Length(t *testing.T) { + addrMap := NewAddressMap() + if got := addrMap.Len(); got != 0 { + t.Fatalf("addrMap.Len() = %v; want 0", got) + } + for i := 0; i < 10; i++ { + addrMap.Set(addr1, nil) + if got, want := addrMap.Len(), 1; got != want { + t.Fatalf("addrMap.Len() = %v; want %v", got, want) + } + addrMap.Set(addr7, nil) // aliases addr1 + } + for i := 0; i < 10; i++ { + addrMap.Set(addr2, nil) + if got, want := addrMap.Len(), 2; got != want { + t.Fatalf("addrMap.Len() = %v; want %v", got, want) + } + } +} + +func (s) TestAddressMap_Get(t *testing.T) { + addrMap := NewAddressMap() + addrMap.Set(addr1, 1) + + if got, ok := addrMap.Get(addr2); ok || got != nil { + t.Fatalf("addrMap.Get(addr1) = %v, %v; want nil, false", got, ok) + } + + addrMap.Set(addr2, 2) + addrMap.Set(addr3, 3) + addrMap.Set(addr4, 4) + addrMap.Set(addr5, 5) + addrMap.Set(addr6, 6) + addrMap.Set(addr7, 7) // aliases addr1 + if got, ok := addrMap.Get(addr1); !ok || got.(int) != 7 { + t.Fatalf("addrMap.Get(addr1) = %v, %v; want %v, true", got, ok, 7) + } + if got, ok := addrMap.Get(addr2); !ok || got.(int) != 2 { + t.Fatalf("addrMap.Get(addr2) = %v, %v; want %v, true", got, ok, 2) + } + if got, ok := addrMap.Get(addr3); !ok || got.(int) != 3 { + t.Fatalf("addrMap.Get(addr3) = %v, %v; want %v, true", got, ok, 3) + } + if got, ok := addrMap.Get(addr4); !ok || got.(int) != 4 { + t.Fatalf("addrMap.Get(addr4) = %v, %v; want %v, true", got, ok, 4) + } + if got, ok := addrMap.Get(addr5); !ok || got.(int) != 5 { + t.Fatalf("addrMap.Get(addr5) = %v, %v; want %v, true", got, ok, 5) + } + if got, ok := addrMap.Get(addr6); !ok || got.(int) != 6 { + t.Fatalf("addrMap.Get(addr6) = %v, %v; want %v, true", got, ok, 6) + } + if got, ok := addrMap.Get(addr7); !ok || got.(int) != 7 { + t.Fatalf("addrMap.Get(addr7) = %v, %v; want %v, true", got, ok, 7) + } +} + +func (s) TestAddressMap_Delete(t *testing.T) { + addrMap := NewAddressMap() + addrMap.Set(addr1, 1) + addrMap.Set(addr2, 2) + if got, want := addrMap.Len(), 2; got != want { + t.Fatalf("addrMap.Len() = %v; want %v", got, want) + } + addrMap.Delete(addr3) + addrMap.Delete(addr4) + addrMap.Delete(addr5) + addrMap.Delete(addr6) + addrMap.Delete(addr7) // aliases addr1 + if got, ok := addrMap.Get(addr1); ok || got != nil { + t.Fatalf("addrMap.Get(addr1) = %v, %v; want nil, false", got, ok) + } + if got, ok := addrMap.Get(addr7); ok || got != nil { + t.Fatalf("addrMap.Get(addr7) = %v, %v; want nil, false", got, ok) + } + if got, ok := addrMap.Get(addr2); !ok || got.(int) != 2 { + t.Fatalf("addrMap.Get(addr2) = %v, %v; want %v, true", got, ok, 2) + } +} + +func (s) TestAddressMap_Keys(t *testing.T) { + addrMap := NewAddressMap() + addrMap.Set(addr1, 1) + addrMap.Set(addr2, 2) + addrMap.Set(addr3, 3) + addrMap.Set(addr4, 4) + addrMap.Set(addr5, 5) + addrMap.Set(addr6, 6) + addrMap.Set(addr7, 7) // aliases addr1 + + want := []Address{addr1, addr2, addr3, addr4, addr5, addr6} + got := addrMap.Keys() + if d := cmp.Diff(want, got, cmp.Transformer("sort", func(in []Address) []Address { + out := append([]Address(nil), in...) + sort.Slice(out, func(i, j int) bool { return fmt.Sprint(out[i]) < fmt.Sprint(out[j]) }) + return out + })); d != "" { + t.Fatalf("addrMap.Keys returned unexpected elements (-want, +got):\n%v", d) + } +} + +func (s) TestAddressMap_Values(t *testing.T) { + addrMap := NewAddressMap() + addrMap.Set(addr1, 1) + addrMap.Set(addr2, 2) + addrMap.Set(addr3, 3) + addrMap.Set(addr4, 4) + addrMap.Set(addr5, 5) + addrMap.Set(addr6, 6) + addrMap.Set(addr7, 7) // aliases addr1 + + want := []int{2, 3, 4, 5, 6, 7} + var got []int + for _, v := range addrMap.Values() { + got = append(got, v.(int)) + } + sort.Ints(got) + if diff := cmp.Diff(want, got); diff != "" { + t.Fatalf("addrMap.Values returned unexpected elements (-want, +got):\n%v", diff) + } +} diff --git a/resolver/resolver.go b/resolver/resolver.go index e9fa8e33d923..d8db6f5d34eb 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -22,7 +22,10 @@ package resolver import ( "context" + "fmt" "net" + "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -38,8 +41,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -94,7 +98,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -116,9 +120,14 @@ type Address struct { ServerName string // Attributes contains arbitrary data about this address intended for - // consumption by the load balancing policy. + // consumption by the SubConn. Attributes *attributes.Attributes + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. + BalancerAttributes *attributes.Attributes + // Type is the type of this address. // // Deprecated: use Attributes instead. @@ -131,6 +140,34 @@ type Address struct { Metadata interface{} } +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Type == o.Type && a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -181,7 +218,16 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. - UpdateState(State) + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. + UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling // ResolveNow on the Resolver with exponential backoff. @@ -204,25 +250,47 @@ type ClientConn interface { // Target represents a target for gRPC, as specified in: // https://github.com/grpc/grpc/blob/master/doc/naming.md. -// It is parsed from the target string that gets passed into Dial or DialContext by the user. And -// grpc passes it to the resolver and the balancer. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. // -// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will -// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed -// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. // -// If the target does not contain a scheme, we will apply the default scheme, and set the Target to -// be the full target string. e.g. "foo.bar" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// Examples: // -// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the -// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target -// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into -// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - Scheme string - Authority string - Endpoint string + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") } // Builder creates a resolver that will be used to watch name resolution updates. @@ -232,8 +300,10 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } diff --git a/internal/credentials/syscallconn_appengine.go b/resolver/resolver_test.go similarity index 74% rename from internal/credentials/syscallconn_appengine.go rename to resolver/resolver_test.go index a6144cd661c2..8d061f9b66d2 100644 --- a/internal/credentials/syscallconn_appengine.go +++ b/resolver/resolver_test.go @@ -1,8 +1,6 @@ -// +build appengine - /* * - * Copyright 2018 gRPC authors. + * Copyright 2021 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,13 +16,18 @@ * */ -package credentials +package resolver import ( - "net" + "testing" + + "google.golang.org/grpc/internal/grpctest" ) -// WrapSyscallConn returns newConn on appengine. -func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { - return newConn +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) } diff --git a/resolver_conn_wrapper.go b/resolver_conn_wrapper.go index f2d81968f9ec..b408b3688f2e 100644 --- a/resolver_conn_wrapper.go +++ b/resolver_conn_wrapper.go @@ -19,184 +19,204 @@ package grpc import ( - "fmt" + "context" "strings" "sync" - "time" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State - - pollingMu sync.Mutex - polling chan struct{} + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} + +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() -} - -// poll begins or ends asynchronous polling of the resolver based on whether -// err is ErrBadResolverState. -func (ccr *ccResolverWrapper) poll(err error) { - ccr.pollingMu.Lock() - defer ccr.pollingMu.Unlock() - if err != balancer.ErrBadResolverState { - // stop polling - if ccr.polling != nil { - close(ccr.polling) - ccr.polling = nil - } - return - } - if ccr.polling != nil { - // already polling + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() return } - p := make(chan struct{}) - ccr.polling = p - go func() { - for i := 0; ; i++ { - ccr.resolveNow(resolver.ResolveNowOptions{}) - t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) - select { - case <-p: - t.Stop() - return - case <-ccr.done.Done(): - // Resolver has been closed. - t.Stop() - return - case <-t.C: - select { - case <-p: - return - default: - } - // Timer expired; re-resolve. - } - } - }() + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() } -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + errCh := make(chan error, 1) + ok := ccr.serializer.Schedule(func(context.Context) { ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. + return nil } - ccr.curState = s - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { + ccr.serializerScheduleLocked(func(_ context.Context) { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } - ccr.curState.Addresses = addrs - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - ccr.poll(balancer.ErrBadResolverState) - return - } - if channelz.IsOn() { + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } - ccr.curState.ServiceConfig = scpr - ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -215,8 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/resolver_conn_wrapper_test.go b/resolver_conn_wrapper_test.go deleted file mode 100644 index f13a408937b1..000000000000 --- a/resolver_conn_wrapper_test.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "errors" - "fmt" - "net" - "strings" - "testing" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/status" -) - -// The target string with unknown scheme should be kept unchanged and passed to -// the dialer. -func (s) TestDialParseTargetUnknownScheme(t *testing.T) { - for _, test := range []struct { - targetStr string - want string - }{ - {"/unix/socket/address", "/unix/socket/address"}, - - // For known scheme. - {"passthrough://a.server.com/google.com", "google.com"}, - } { - dialStrCh := make(chan string, 1) - cc, err := Dial(test.targetStr, WithInsecure(), WithDialer(func(addr string, _ time.Duration) (net.Conn, error) { - select { - case dialStrCh <- addr: - default: - } - return nil, fmt.Errorf("test dialer, always error") - })) - if err != nil { - t.Fatalf("Failed to create ClientConn: %v", err) - } - got := <-dialStrCh - cc.Close() - if got != test.want { - t.Errorf("Dial(%q), dialer got %q, want %q", test.targetStr, got, test.want) - } - } -} - -func testResolverErrorPolling(t *testing.T, badUpdate func(*manual.Resolver), goodUpdate func(*manual.Resolver), dopts ...DialOption) { - boIter := make(chan int) - resolverBackoff := func(v int) time.Duration { - boIter <- v - return 0 - } - - r := manual.NewBuilderWithScheme("whatever") - rn := make(chan struct{}) - defer func() { close(rn) }() - r.ResolveNowCallback = func(resolver.ResolveNowOptions) { rn <- struct{}{} } - - defaultDialOptions := []DialOption{ - WithInsecure(), - WithResolvers(r), - withResolveNowBackoff(resolverBackoff), - } - cc, err := Dial(r.Scheme()+":///test.server", append(defaultDialOptions, dopts...)...) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) - } - defer cc.Close() - badUpdate(r) - - panicAfter := time.AfterFunc(5*time.Second, func() { panic("timed out polling resolver") }) - defer panicAfter.Stop() - - // Ensure ResolveNow is called, then Backoff with the right parameter, several times - for i := 0; i < 7; i++ { - <-rn - if v := <-boIter; v != i { - t.Errorf("Backoff call %v uses value %v", i, v) - } - } - - // UpdateState will block if ResolveNow is being called (which blocks on - // rn), so call it in a goroutine. - goodUpdate(r) - - // Wait awhile to ensure ResolveNow and Backoff stop being called when the - // state is OK (i.e. polling was cancelled). - for { - t := time.NewTimer(50 * time.Millisecond) - select { - case <-rn: - // ClientConn is still calling ResolveNow - <-boIter - time.Sleep(5 * time.Millisecond) - continue - case <-t.C: - // ClientConn stopped calling ResolveNow; success - } - break - } -} - -const happyBalancerName = "happy balancer" - -func init() { - // Register a balancer that never returns an error from - // UpdateClientConnState, and doesn't do anything else either. - bf := stub.BalancerFuncs{ - UpdateClientConnState: func(*stub.BalancerData, balancer.ClientConnState) error { - return nil - }, - } - stub.Register(happyBalancerName, bf) -} - -// TestResolverErrorPolling injects resolver errors and verifies ResolveNow is -// called with the appropriate backoff strategy being consulted between -// ResolveNow calls. -func (s) TestResolverErrorPolling(t *testing.T) { - testResolverErrorPolling(t, func(r *manual.Resolver) { - r.CC.ReportError(errors.New("res err")) - }, func(r *manual.Resolver) { - // UpdateState will block if ResolveNow is being called (which blocks on - // rn), so call it in a goroutine. - go r.CC.UpdateState(resolver.State{}) - }, - WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, happyBalancerName))) -} - -// TestServiceConfigErrorPolling injects a service config error and verifies -// ResolveNow is called with the appropriate backoff strategy being consulted -// between ResolveNow calls. -func (s) TestServiceConfigErrorPolling(t *testing.T) { - testResolverErrorPolling(t, func(r *manual.Resolver) { - badsc := r.CC.ParseServiceConfig("bad config") - r.UpdateState(resolver.State{ServiceConfig: badsc}) - }, func(r *manual.Resolver) { - // UpdateState will block if ResolveNow is being called (which blocks on - // rn), so call it in a goroutine. - go r.CC.UpdateState(resolver.State{}) - }, - WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, happyBalancerName))) -} - -// TestResolverErrorInBuild makes the resolver.Builder call into the ClientConn -// during the Build call. We use two separate mutexes in the code which make -// sure there is no data race in this code path, and also that there is no -// deadlock. -func (s) TestResolverErrorInBuild(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Err: errors.New("resolver build err")}}) - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) - } - defer cc.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - var dummy int - const wantMsg = "error parsing service config" - const wantCode = codes.Unavailable - if err := cc.Invoke(ctx, "/foo/bar", &dummy, &dummy); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { - t.Fatalf("cc.Invoke(_, _, _, _) = %v; want status.Code()==%v, status.Message() contains %q", err, wantCode, wantMsg) - } -} - -func (s) TestServiceConfigErrorRPC(t *testing.T) { - r := manual.NewBuilderWithScheme("whatever") - - cc, err := Dial(r.Scheme()+":///test.server", WithInsecure(), WithResolvers(r)) - if err != nil { - t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) - } - defer cc.Close() - badsc := r.CC.ParseServiceConfig("bad config") - r.UpdateState(resolver.State{ServiceConfig: badsc}) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - var dummy int - const wantMsg = "error parsing service config" - const wantCode = codes.Unavailable - if err := cc.Invoke(ctx, "/foo/bar", &dummy, &dummy); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { - t.Fatalf("cc.Invoke(_, _, _, _) = %v; want status.Code()==%v, status.Message() contains %q", err, wantCode, wantMsg) - } -} diff --git a/resolver_test.go b/resolver_test.go new file mode 100644 index 000000000000..5b1e40c2a3dc --- /dev/null +++ b/resolver_test.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "testing" + + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/resolver" +) + +type wrapResolverBuilder struct { + resolver.Builder + scheme string +} + +func (w *wrapResolverBuilder) Scheme() string { + return w.scheme +} + +func init() { + resolver.Register(&wrapResolverBuilder{Builder: resolver.Get("passthrough"), scheme: "casetest"}) + resolver.Register(&wrapResolverBuilder{Builder: resolver.Get("dns"), scheme: "caseTest"}) +} + +func (s) TestResolverCaseSensitivity(t *testing.T) { + // This should find the "casetest" resolver instead of the "caseTest" + // resolver, even though the latter was registered later. "casetest" is + // "passthrough" and "caseTest" is "dns". With "passthrough" the dialer + // should see the target's address directly, but "dns" would be converted + // into a loopback IP (v4 or v6) address. + target := "caseTest:///localhost:1234" + addrCh := make(chan string, 1) + customDialer := func(ctx context.Context, addr string) (net.Conn, error) { + select { + case addrCh <- addr: + default: + } + return nil, fmt.Errorf("not dialing with custom dialer") + } + + cc, err := Dial(target, WithContextDialer(customDialer), WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Unexpected Dial(%q) error: %v", target, err) + } + cc.Connect() + if got, want := <-addrCh, "localhost:1234"; got != want { + cc.Close() + t.Fatalf("Dialer got address %q; wanted %q", got, want) + } + cc.Close() + + // Clear addrCh for future use. + select { + case <-addrCh: + default: + } + + res := &wrapResolverBuilder{Builder: resolver.Get("dns"), scheme: "caseTest2"} + // This should not find the injected resolver due to the case not matching. + // This results in "passthrough" being used with the address as the whole + // target. + target = "caseTest2:///localhost:1234" + cc, err = Dial(target, WithContextDialer(customDialer), WithResolvers(res), WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Unexpected Dial(%q) error: %v", target, err) + } + cc.Connect() + if got, want := <-addrCh, target; got != want { + cc.Close() + t.Fatalf("Dialer got address %q; wanted %q", got, want) + } + cc.Close() +} diff --git a/rpc_util.go b/rpc_util.go index c0a1208f2f30..a844d28f49d0 100644 --- a/rpc_util.go +++ b/rpc_util.go @@ -25,7 +25,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" "strings" "sync" @@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -160,6 +159,7 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -198,7 +198,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +220,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +242,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -258,7 +258,8 @@ func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { } // WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false, the RPC will fail +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail // immediately. Otherwise, the RPC client will block the call until a // connection is available (or the call is canceled or times out) and will // retry the call if it fails due to a transient error. gRPC will not retry if @@ -281,7 +282,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -295,8 +296,44 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -304,7 +341,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -319,7 +356,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } @@ -327,7 +365,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -350,7 +388,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -368,7 +406,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -378,7 +416,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -415,7 +453,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -429,9 +467,10 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { } func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} -// ForceCodec returns a CallOption that will set the given Codec to be -// used for all request and response messages for a call. The result of calling -// String() will be used as the content-subtype in a case-insensitive manner. +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. // // See Content-Type on // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for @@ -442,7 +481,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -453,7 +492,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -478,7 +517,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -495,7 +534,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -506,7 +545,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -538,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -546,10 +588,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. @@ -570,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -654,12 +695,13 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, } } @@ -680,17 +722,17 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. + compressedLength int // The compressed length got from wire. uncompressedBytes []byte } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.wireLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -702,23 +744,21 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } - } else { - size = len(d) - } - if size > maxReceiveMessageSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -745,7 +785,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } @@ -753,15 +793,17 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + if err := c.Unmarshal(buf, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -827,33 +869,45 @@ func Errorf(c codes.Code, format string, a ...interface{}) error { // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { - if err == nil || err == io.EOF { + switch err { + case nil, io.EOF: return err - } - if err == io.ErrUnexpectedEOF { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } - if _, ok := status.FromError(err); ok { - return err - } + switch e := err.(type) { case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) - default: - switch err { - case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return status.Error(codes.Canceled, err.Error()) - } + case *transport.NewStreamError: + return toRPCErr(e.Err) } + + if _, ok := status.FromError(err); ok { + return err + } + return status.Error(codes.Unknown, err.Error()) } // setCallInfoCodec should only be called after CallOptions have been applied. func setCallInfoCodec(c *callInfo) error { if c.codec != nil { - // codec was already set by a CallOption; use it. + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } return nil } diff --git a/rpc_util_test.go b/rpc_util_test.go index 90912d52a226..84f2348655b9 100644 --- a/rpc_util_test.go +++ b/rpc_util_test.go @@ -65,7 +65,7 @@ func (s) TestSimpleParsing(t *testing.T) { {append([]byte{0, 1, 0, 0, 0}, bigMsg...), nil, bigMsg, compressionNone}, } { buf := fullReader{bytes.NewReader(test.p)} - parser := &parser{r: buf} + parser := &parser{r: buf, recvBufferPool: nopBufferPool{}} pt, b, err := parser.recvMsg(math.MaxInt32) if err != test.err || !bytes.Equal(b, test.b) || pt != test.pt { t.Fatalf("parser{%v}.recvMsg(_) = %v, %v, %v\nwant %v, %v, %v", test.p, pt, b, err, test.pt, test.b, test.err) @@ -77,7 +77,7 @@ func (s) TestMultipleParsing(t *testing.T) { // Set a byte stream consists of 3 messages with their headers. p := []byte{0, 0, 0, 0, 1, 'a', 0, 0, 0, 0, 2, 'b', 'c', 0, 0, 0, 0, 1, 'd'} b := fullReader{bytes.NewReader(p)} - parser := &parser{r: b} + parser := &parser{r: b, recvBufferPool: nopBufferPool{}} wantRecvs := []struct { pt payloadFormat diff --git a/security/advancedtls/advancedtls.go b/security/advancedtls/advancedtls.go index 534a3ed417ba..4b5d1f4825c9 100644 --- a/security/advancedtls/advancedtls.go +++ b/security/advancedtls/advancedtls.go @@ -181,6 +181,18 @@ type ClientOptions struct { RootOptions RootCertificateOptions // VType is the verification type on the client side. VType VerificationType + // RevocationConfig is the configurations for certificate revocation checks. + // It could be nil if such checks are not needed. + RevocationConfig *RevocationConfig + // MinVersion contains the minimum TLS version that is acceptable. + // By default, TLS 1.2 is currently used as the minimum when acting as a + // client, and TLS 1.0 when acting as a server. TLS 1.0 is the minimum + // supported by this package, both as a client and as a server. + MinVersion uint16 + // MaxVersion contains the maximum TLS version that is acceptable. + // By default, the maximum version supported by this package is used, + // which is currently TLS 1.3. + MaxVersion uint16 } // ServerOptions contains the fields needed to be filled by the server. @@ -199,6 +211,18 @@ type ServerOptions struct { RequireClientCert bool // VType is the verification type on the server side. VType VerificationType + // RevocationConfig is the configurations for certificate revocation checks. + // It could be nil if such checks are not needed. + RevocationConfig *RevocationConfig + // MinVersion contains the minimum TLS version that is acceptable. + // By default, TLS 1.2 is currently used as the minimum when acting as a + // client, and TLS 1.0 when acting as a server. TLS 1.0 is the minimum + // supported by this package, both as a client and as a server. + MinVersion uint16 + // MaxVersion contains the maximum TLS version that is acceptable. + // By default, the maximum version supported by this package is used, + // which is currently TLS 1.3. + MaxVersion uint16 } func (o *ClientOptions) config() (*tls.Config, error) { @@ -216,11 +240,16 @@ func (o *ClientOptions) config() (*tls.Config, error) { if o.IdentityOptions.GetIdentityCertificatesForServer != nil { return nil, fmt.Errorf("GetIdentityCertificatesForServer cannot be specified on the client side") } + if o.MinVersion > o.MaxVersion { + return nil, fmt.Errorf("the minimum TLS version is larger than the maximum TLS version") + } config := &tls.Config{ ServerName: o.ServerNameOverride, // We have to set InsecureSkipVerify to true to skip the default checks and // use the verification function we built from buildVerifyFunc. InsecureSkipVerify: true, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, } // Propagate root-certificate-related fields in tls.Config. switch { @@ -287,6 +316,9 @@ func (o *ServerOptions) config() (*tls.Config, error) { if o.IdentityOptions.GetIdentityCertificatesForClient != nil { return nil, fmt.Errorf("GetIdentityCertificatesForClient cannot be specified on the server side") } + if o.MinVersion > o.MaxVersion { + return nil, fmt.Errorf("the minimum TLS version is larger than the maximum TLS version") + } clientAuth := tls.NoClientCert if o.RequireClientCert { // We have to set clientAuth to RequireAnyClientCert to force underlying @@ -296,6 +328,8 @@ func (o *ServerOptions) config() (*tls.Config, error) { } config := &tls.Config{ ClientAuth: clientAuth, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, } // Propagate root-certificate-related fields in tls.Config. switch { @@ -356,11 +390,12 @@ func (o *ServerOptions) config() (*tls.Config, error) { // advancedTLSCreds is the credentials required for authenticating a connection // using TLS. type advancedTLSCreds struct { - config *tls.Config - verifyFunc CustomVerificationFunc - getRootCAs func(params *GetRootCAsParams) (*GetRootCAsResults, error) - isClient bool - vType VerificationType + config *tls.Config + verifyFunc CustomVerificationFunc + getRootCAs func(params *GetRootCAsParams) (*GetRootCAsResults, error) + isClient bool + vType VerificationType + revocationConfig *RevocationConfig } func (c advancedTLSCreds) Info() credentials.ProtocolInfo { @@ -442,15 +477,23 @@ func (c *advancedTLSCreds) OverrideServerName(serverNameOverride string) error { // and possibly custom verification check. // We have to build our own verification function here because current // tls module: -// 1. does not have a good support on root cert reloading. -// 2. will ignore basic certificate check when setting InsecureSkipVerify -// to true. +// 1. does not have a good support on root cert reloading. +// 2. will ignore basic certificate check when setting InsecureSkipVerify +// to true. func buildVerifyFunc(c *advancedTLSCreds, serverName string, rawConn net.Conn) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { chains := verifiedChains var leafCert *x509.Certificate + rawCertList := make([]*x509.Certificate, len(rawCerts)) + for i, asn1Data := range rawCerts { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + return err + } + rawCertList[i] = cert + } if c.vType == CertAndHostVerification || c.vType == CertVerification { // perform possible trust credential reloading and certificate check rootCAs := c.config.RootCAs @@ -469,14 +512,6 @@ func buildVerifyFunc(c *advancedTLSCreds, rootCAs = results.TrustCerts } // Verify peers' certificates against RootCAs and get verifiedChains. - certs := make([]*x509.Certificate, len(rawCerts)) - for i, asn1Data := range rawCerts { - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - return err - } - certs[i] = cert - } keyUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} if !c.isClient { keyUsages = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} @@ -487,7 +522,7 @@ func buildVerifyFunc(c *advancedTLSCreds, Intermediates: x509.NewCertPool(), KeyUsages: keyUsages, } - for _, cert := range certs[1:] { + for _, cert := range rawCertList[1:] { opts.Intermediates.AddCert(cert) } // Perform default hostname check if specified. @@ -501,11 +536,21 @@ func buildVerifyFunc(c *advancedTLSCreds, opts.DNSName = parsedName } var err error - chains, err = certs[0].Verify(opts) + chains, err = rawCertList[0].Verify(opts) if err != nil { return err } - leafCert = certs[0] + leafCert = rawCertList[0] + } + // Perform certificate revocation check if specified. + if c.revocationConfig != nil { + verifiedChains := chains + if verifiedChains == nil { + verifiedChains = [][]*x509.Certificate{rawCertList} + } + if err := CheckChainRevocation(verifiedChains, *c.revocationConfig); err != nil { + return err + } } // Perform custom verification check if specified. if c.verifyFunc != nil { @@ -529,11 +574,12 @@ func NewClientCreds(o *ClientOptions) (credentials.TransportCredentials, error) return nil, err } tc := &advancedTLSCreds{ - config: conf, - isClient: true, - getRootCAs: o.RootOptions.GetRootCertificates, - verifyFunc: o.VerifyPeer, - vType: o.VType, + config: conf, + isClient: true, + getRootCAs: o.RootOptions.GetRootCertificates, + verifyFunc: o.VerifyPeer, + vType: o.VType, + revocationConfig: o.RevocationConfig, } tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) return tc, nil @@ -547,11 +593,12 @@ func NewServerCreds(o *ServerOptions) (credentials.TransportCredentials, error) return nil, err } tc := &advancedTLSCreds{ - config: conf, - isClient: false, - getRootCAs: o.RootOptions.GetRootCertificates, - verifyFunc: o.VerifyPeer, - vType: o.VType, + config: conf, + isClient: false, + getRootCAs: o.RootOptions.GetRootCertificates, + verifyFunc: o.VerifyPeer, + vType: o.VType, + revocationConfig: o.RevocationConfig, } tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) return tc, nil diff --git a/security/advancedtls/advancedtls_integration_test.go b/security/advancedtls/advancedtls_integration_test.go index 4bb9e645b0a1..3659497fd5d2 100644 --- a/security/advancedtls/advancedtls_integration_test.go +++ b/security/advancedtls/advancedtls_integration_test.go @@ -23,7 +23,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "os" "sync" @@ -42,8 +41,6 @@ import ( const ( // Default timeout for normal connections. defaultTestTimeout = 5 * time.Second - // Default timeout for failed connections. - defaultTestShortTimeout = 10 * time.Millisecond // Intervals that set to monitor the credential updates. credRefreshingInterval = 200 * time.Millisecond // Time we wait for the credential updates to be picked up. @@ -308,7 +305,7 @@ func (s) TestEnd2End(t *testing.T) { // The mutual authentication works at the beginning, since ClientCert1 // trusted by ServerTrust1, ServerCert1 by ClientTrust1, and also the // custom verification check on server side allows all connections. - // At stage 1, server disallows the the connections by setting custom + // At stage 1, server disallows the connections by setting custom // verification check. The following calls should fail. Previous // connections should not be affected. // At stage 2, server allows all the connections again and the @@ -380,7 +377,7 @@ func (s) TestEnd2End(t *testing.T) { } clientTLSCreds, err := NewClientCreds(clientOptions) if err != nil { - t.Fatalf("clientTLSCreds failed to create") + t.Fatalf("clientTLSCreds failed to create: %v", err) } // ------------------------Scenario 1------------------------------------ // stage = 0, initial connection should succeed @@ -401,18 +398,19 @@ func (s) TestEnd2End(t *testing.T) { } // ------------------------Scenario 3------------------------------------ // stage = 1, new connection should fail - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - conn2, greetClient, err := callAndVerifyWithClientConn(shortCtx, addr, "rpc call 3", clientTLSCreds, true) + ctx2, cancel2 := context.WithTimeout(context.Background(), defaultTestTimeout) + conn2, _, err := callAndVerifyWithClientConn(ctx2, addr, "rpc call 3", clientTLSCreds, true) if err != nil { t.Fatal(err) } defer conn2.Close() + // Immediately cancel the context so the dialing won't drag the entire timeout still it stops. + cancel2() // ---------------------------------------------------------------------- stage.increase() // ------------------------Scenario 4------------------------------------ // stage = 2, new connection should succeed - conn3, greetClient, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 4", clientTLSCreds, false) + conn3, _, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 4", clientTLSCreds, false) if err != nil { t.Fatal(err) } @@ -436,27 +434,27 @@ type tmpCredsFiles struct { func createTmpFiles() (*tmpCredsFiles, error) { tmpFiles := &tmpCredsFiles{} var err error - tmpFiles.clientCertTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.clientCertTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.clientKeyTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.clientKeyTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.clientTrustTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.clientTrustTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.serverCertTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.serverCertTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.serverKeyTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.serverKeyTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } - tmpFiles.serverTrustTmp, err = ioutil.TempFile(os.TempDir(), "pre-") + tmpFiles.serverTrustTmp, err = os.CreateTemp(os.TempDir(), "pre-") if err != nil { return nil, err } @@ -496,11 +494,11 @@ func (tmpFiles *tmpCredsFiles) removeFiles() { } func copyFileContents(sourceFile, destinationFile string) error { - input, err := ioutil.ReadFile(sourceFile) + input, err := os.ReadFile(sourceFile) if err != nil { return err } - err = ioutil.WriteFile(destinationFile, input, 0644) + err = os.WriteFile(destinationFile, input, 0644) if err != nil { return err } @@ -555,7 +553,7 @@ func createProviders(tmpFiles *tmpCredsFiles) (certprovider.Provider, certprovid // Next, we change the identity certs that IdentityProvider is watching. Since // the identity key is not changed, the IdentityProvider should ignore the // update, and the connection should still be good. -// Then the the identity key is changed. This time IdentityProvider should pick +// Then the identity key is changed. This time IdentityProvider should pick // up the update, and the connection should fail, due to the trust certs on the // other side is not changed. // Finally, the trust certs that other-side's RootProvider is watching get @@ -692,7 +690,7 @@ func (s) TestPEMFileProviderEnd2End(t *testing.T) { } // New connections should still be good, because the Provider didn't pick // up the changes due to key-cert mismatch. - conn2, greetClient, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 3", clientTLSCreds, false) + conn2, _, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 3", clientTLSCreds, false) if err != nil { t.Fatal(err) } @@ -704,20 +702,21 @@ func (s) TestPEMFileProviderEnd2End(t *testing.T) { // New connections should fail now, because the Provider picked the // change, and *_cert_2.pem is not trusted by *_trust_cert_1.pem on the // other side. - shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer shortCancel() - conn3, greetClient, err := callAndVerifyWithClientConn(shortCtx, addr, "rpc call 4", clientTLSCreds, true) + ctx2, cancel2 := context.WithTimeout(context.Background(), defaultTestTimeout) + conn3, _, err := callAndVerifyWithClientConn(ctx2, addr, "rpc call 4", clientTLSCreds, true) if err != nil { t.Fatal(err) } defer conn3.Close() + // Immediately cancel the context so the dialing won't drag the entire timeout still it stops. + cancel2() // Make the trust cert change on the other side, and wait 1 second for // the provider to pick up the change. test.trustCertUpdateFunc() time.Sleep(sleepInterval) // New connections should be good, because the other side is using // *_trust_cert_2.pem now. - conn4, greetClient, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 5", clientTLSCreds, false) + conn4, _, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 5", clientTLSCreds, false) if err != nil { t.Fatal(err) } @@ -732,13 +731,12 @@ func (s) TestDefaultHostNameCheck(t *testing.T) { t.Fatalf("cs.LoadCerts() failed, err: %v", err) } for _, test := range []struct { - desc string - clientRoot *x509.CertPool - clientVerifyFunc CustomVerificationFunc - clientVType VerificationType - serverCert []tls.Certificate - serverVType VerificationType - expectError bool + desc string + clientRoot *x509.CertPool + clientVType VerificationType + serverCert []tls.Certificate + serverVType VerificationType + expectError bool }{ // Client side sets vType to CertAndHostVerification, and will do // default hostname check. Server uses a cert without "localhost" or @@ -788,7 +786,6 @@ func (s) TestDefaultHostNameCheck(t *testing.T) { pb.RegisterGreeterServer(s, greeterServer{}) go s.Serve(lis) clientOptions := &ClientOptions{ - VerifyPeer: test.clientVerifyFunc, RootOptions: RootCertificateOptions{ RootCACerts: test.clientRoot, }, @@ -796,7 +793,149 @@ func (s) TestDefaultHostNameCheck(t *testing.T) { } clientTLSCreds, err := NewClientCreds(clientOptions) if err != nil { - t.Fatalf("clientTLSCreds failed to create") + t.Fatalf("clientTLSCreds failed to create: %v", err) + } + shouldFail := false + if test.expectError { + shouldFail = true + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + conn, _, err := callAndVerifyWithClientConn(ctx, addr, "rpc call 1", clientTLSCreds, shouldFail) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + }) + } +} + +func (s) TestTLSVersions(t *testing.T) { + cs := &testutils.CertStore{} + if err := cs.LoadCerts(); err != nil { + t.Fatalf("cs.LoadCerts() failed, err: %v", err) + } + for _, test := range []struct { + desc string + expectError bool + clientMinVersion uint16 + clientMaxVersion uint16 + serverMinVersion uint16 + serverMaxVersion uint16 + }{ + // Client side sets TLS version that is higher than required from the server side. + { + desc: "Client TLS version higher than server", + clientMinVersion: tls.VersionTLS13, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS12, + expectError: true, + }, + // Server side sets TLS version that is higher than required from the client side. + { + desc: "Server TLS version higher than client", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS12, + serverMinVersion: tls.VersionTLS13, + serverMaxVersion: tls.VersionTLS13, + expectError: true, + }, + // Client and server set proper TLS versions. + { + desc: "Good TLS version settings", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + { + desc: "Client 1.2 - 1.3 and server 1.2", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS12, + expectError: false, + }, + { + desc: "Client 1.2 - 1.3 and server 1.1 - 1.2", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS11, + serverMaxVersion: tls.VersionTLS12, + expectError: false, + }, + { + desc: "Client 1.2 - 1.3 and server 1.3", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS13, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + { + desc: "Client 1.2 - 1.2 and server 1.2 - 1.3", + clientMinVersion: tls.VersionTLS12, + clientMaxVersion: tls.VersionTLS12, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + { + desc: "Client 1.1 - 1.2 and server 1.2 - 1.3", + clientMinVersion: tls.VersionTLS11, + clientMaxVersion: tls.VersionTLS12, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + { + desc: "Client 1.3 and server 1.2 - 1.3", + clientMinVersion: tls.VersionTLS13, + clientMaxVersion: tls.VersionTLS13, + serverMinVersion: tls.VersionTLS12, + serverMaxVersion: tls.VersionTLS13, + expectError: false, + }, + } { + test := test + t.Run(test.desc, func(t *testing.T) { + // Start a server using ServerOptions in another goroutine. + serverOptions := &ServerOptions{ + IdentityOptions: IdentityCertificateOptions{ + Certificates: []tls.Certificate{cs.ServerPeerLocalhost1}, + }, + RequireClientCert: false, + VType: CertAndHostVerification, + MinVersion: test.serverMinVersion, + MaxVersion: test.serverMaxVersion, + } + serverTLSCreds, err := NewServerCreds(serverOptions) + if err != nil { + t.Fatalf("failed to create server creds: %v", err) + } + s := grpc.NewServer(grpc.Creds(serverTLSCreds)) + defer s.Stop() + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to listen: %v", err) + } + defer lis.Close() + addr := fmt.Sprintf("localhost:%v", lis.Addr().(*net.TCPAddr).Port) + pb.RegisterGreeterServer(s, greeterServer{}) + go s.Serve(lis) + clientOptions := &ClientOptions{ + RootOptions: RootCertificateOptions{ + RootCACerts: cs.ClientTrust1, + }, + VType: CertAndHostVerification, + MinVersion: test.clientMinVersion, + MaxVersion: test.clientMaxVersion, + } + clientTLSCreds, err := NewClientCreds(clientOptions) + if err != nil { + t.Fatalf("clientTLSCreds failed to create: %v", err) } shouldFail := false if test.expectError { diff --git a/security/advancedtls/advancedtls_test.go b/security/advancedtls/advancedtls_test.go index 64da81a1700c..afad25e7cb4b 100644 --- a/security/advancedtls/advancedtls_test.go +++ b/security/advancedtls/advancedtls_test.go @@ -27,10 +27,12 @@ import ( "net" "testing" + lru "github.com/hashicorp/golang-lru" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/security/advancedtls/internal/testutils" + "google.golang.org/grpc/security/advancedtls/testdata" ) type s struct { @@ -89,6 +91,8 @@ func (s) TestClientOptionsConfigErrorCases(t *testing.T) { clientVType VerificationType IdentityOptions IdentityCertificateOptions RootOptions RootCertificateOptions + MinVersion uint16 + MaxVersion uint16 }{ { desc: "Skip default verification and provide no root credentials", @@ -120,6 +124,11 @@ func (s) TestClientOptionsConfigErrorCases(t *testing.T) { }, }, }, + { + desc: "Invalid min/max TLS versions", + MinVersion: tls.VersionTLS13, + MaxVersion: tls.VersionTLS12, + }, } for _, test := range tests { test := test @@ -128,6 +137,8 @@ func (s) TestClientOptionsConfigErrorCases(t *testing.T) { VType: test.clientVType, IdentityOptions: test.IdentityOptions, RootOptions: test.RootOptions, + MinVersion: test.MinVersion, + MaxVersion: test.MaxVersion, } _, err := clientOptions.config() if err == nil { @@ -143,6 +154,8 @@ func (s) TestClientOptionsConfigSuccessCases(t *testing.T) { clientVType VerificationType IdentityOptions IdentityCertificateOptions RootOptions RootCertificateOptions + MinVersion uint16 + MaxVersion uint16 }{ { desc: "Use system default if no fields in RootCertificateOptions is specified", @@ -157,6 +170,8 @@ func (s) TestClientOptionsConfigSuccessCases(t *testing.T) { IdentityOptions: IdentityCertificateOptions{ IdentityProvider: fakeProvider{pt: provTypeIdentity}, }, + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS13, }, } for _, test := range tests { @@ -166,6 +181,8 @@ func (s) TestClientOptionsConfigSuccessCases(t *testing.T) { VType: test.clientVType, IdentityOptions: test.IdentityOptions, RootOptions: test.RootOptions, + MinVersion: test.MinVersion, + MaxVersion: test.MaxVersion, } clientConfig, err := clientOptions.config() if err != nil { @@ -190,6 +207,8 @@ func (s) TestServerOptionsConfigErrorCases(t *testing.T) { serverVType VerificationType IdentityOptions IdentityCertificateOptions RootOptions RootCertificateOptions + MinVersion uint16 + MaxVersion uint16 }{ { desc: "Skip default verification and provide no root credentials", @@ -227,6 +246,11 @@ func (s) TestServerOptionsConfigErrorCases(t *testing.T) { }, }, }, + { + desc: "Invalid min/max TLS versions", + MinVersion: tls.VersionTLS13, + MaxVersion: tls.VersionTLS12, + }, } for _, test := range tests { test := test @@ -236,6 +260,8 @@ func (s) TestServerOptionsConfigErrorCases(t *testing.T) { RequireClientCert: test.requireClientCert, IdentityOptions: test.IdentityOptions, RootOptions: test.RootOptions, + MinVersion: test.MinVersion, + MaxVersion: test.MaxVersion, } _, err := serverOptions.config() if err == nil { @@ -252,6 +278,8 @@ func (s) TestServerOptionsConfigSuccessCases(t *testing.T) { serverVType VerificationType IdentityOptions IdentityCertificateOptions RootOptions RootCertificateOptions + MinVersion uint16 + MaxVersion uint16 }{ { desc: "Use system default if no fields in RootCertificateOptions is specified", @@ -273,6 +301,8 @@ func (s) TestServerOptionsConfigSuccessCases(t *testing.T) { return nil, nil }, }, + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS13, }, } for _, test := range tests { @@ -283,6 +313,8 @@ func (s) TestServerOptionsConfigSuccessCases(t *testing.T) { RequireClientCert: test.requireClientCert, IdentityOptions: test.IdentityOptions, RootOptions: test.RootOptions, + MinVersion: test.MinVersion, + MaxVersion: test.MaxVersion, } serverConfig, err := serverOptions.config() if err != nil { @@ -339,6 +371,10 @@ func (s) TestClientServerHandshake(t *testing.T) { getRootCAsForServerBad := func(params *GetRootCAsParams) (*GetRootCAsResults, error) { return nil, fmt.Errorf("bad root certificate reloading") } + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } for _, test := range []struct { desc string clientCert []tls.Certificate @@ -349,6 +385,7 @@ func (s) TestClientServerHandshake(t *testing.T) { clientVType VerificationType clientRootProvider certprovider.Provider clientIdentityProvider certprovider.Provider + clientRevocationConfig *RevocationConfig clientExpectHandshakeError bool serverMutualTLS bool serverCert []tls.Certificate @@ -359,6 +396,7 @@ func (s) TestClientServerHandshake(t *testing.T) { serverVType VerificationType serverRootProvider certprovider.Provider serverIdentityProvider certprovider.Provider + serverRevocationConfig *RevocationConfig serverExpectError bool }{ // Client: nil setting except verifyFuncGood @@ -642,6 +680,30 @@ func (s) TestClientServerHandshake(t *testing.T) { serverRootProvider: fakeProvider{isClient: false}, serverVType: CertVerification, }, + // Client: set valid credentials with the revocation config + // Server: set valid credentials with the revocation config + // Expected Behavior: success, because non of the certificate chains sent in the connection are revoked + { + desc: "Client sets peer cert, reload root function with verifyFuncGood; Server sets peer cert, reload root function; mutualTLS", + clientCert: []tls.Certificate{cs.ClientCert1}, + clientGetRoot: getRootCAsForClient, + clientVerifyFunc: clientVerifyFuncGood, + clientVType: CertVerification, + clientRevocationConfig: &RevocationConfig{ + RootDir: testdata.Path("crl"), + AllowUndetermined: true, + Cache: cache, + }, + serverMutualTLS: true, + serverCert: []tls.Certificate{cs.ServerCert1}, + serverGetRoot: getRootCAsForServer, + serverVType: CertVerification, + serverRevocationConfig: &RevocationConfig{ + RootDir: testdata.Path("crl"), + AllowUndetermined: true, + Cache: cache, + }, + }, } { test := test t.Run(test.desc, func(t *testing.T) { @@ -665,6 +727,7 @@ func (s) TestClientServerHandshake(t *testing.T) { RequireClientCert: test.serverMutualTLS, VerifyPeer: test.serverVerifyFunc, VType: test.serverVType, + RevocationConfig: test.serverRevocationConfig, } go func(done chan credentials.AuthInfo, lis net.Listener, serverOptions *ServerOptions) { serverRawConn, err := lis.Accept() @@ -706,7 +769,8 @@ func (s) TestClientServerHandshake(t *testing.T) { GetRootCertificates: test.clientGetRoot, RootProvider: test.clientRootProvider, }, - VType: test.clientVType, + VType: test.clientVType, + RevocationConfig: test.clientRevocationConfig, } clientTLS, err := NewClientCreds(clientOptions) if err != nil { diff --git a/security/advancedtls/crl.go b/security/advancedtls/crl.go new file mode 100644 index 000000000000..bb490a5c8dba --- /dev/null +++ b/security/advancedtls/crl.go @@ -0,0 +1,548 @@ +// TODO(@gregorycooke) - Remove when only golang 1.19+ is supported +//go:build go1.19 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package advancedtls + +import ( + "bytes" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/crypto/cryptobyte" + cbasn1 "golang.org/x/crypto/cryptobyte/asn1" + "google.golang.org/grpc/grpclog" +) + +var grpclogLogger = grpclog.Component("advancedtls") + +// Cache is an interface to cache CRL files. +// The cache implementation must be concurrency safe. +// A fixed size lru cache from golang-lru is recommended. +type Cache interface { + // Add adds a value to the cache. + Add(key, value interface{}) bool + // Get looks up a key's value from the cache. + Get(key interface{}) (value interface{}, ok bool) +} + +// RevocationConfig contains options for CRL lookup. +type RevocationConfig struct { + // RootDir is the directory to search for CRL files. + // Directory format must match OpenSSL X509_LOOKUP_hash_dir(3). + RootDir string + // AllowUndetermined controls if certificate chains with RevocationUndetermined + // revocation status are allowed to complete. + AllowUndetermined bool + // Cache will store CRL files if not nil, otherwise files are reloaded for every lookup. + Cache Cache +} + +// RevocationStatus is the revocation status for a certificate or chain. +type RevocationStatus int + +const ( + // RevocationUndetermined means we couldn't find or verify a CRL for the cert. + RevocationUndetermined RevocationStatus = iota + // RevocationUnrevoked means we found the CRL for the cert and the cert is not revoked. + RevocationUnrevoked + // RevocationRevoked means we found the CRL and the cert is revoked. + RevocationRevoked +) + +func (s RevocationStatus) String() string { + return [...]string{"RevocationUndetermined", "RevocationUnrevoked", "RevocationRevoked"}[s] +} + +// certificateListExt contains a pkix.CertificateList and parsed +// extensions that aren't provided by the golang CRL parser. +type certificateListExt struct { + CertList *x509.RevocationList + // RFC5280, 5.2.1, all conforming CRLs must have a AKID with the ID method. + AuthorityKeyID []byte + RawIssuer []byte +} + +const tagDirectoryName = 4 + +var ( + // RFC5280, 5.2.4 id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } + oidDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27} + // RFC5280, 5.2.5 id-ce-issuingDistributionPoint OBJECT IDENTIFIER ::= { id-ce 28 } + oidIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28} + // RFC5280, 5.3.3 id-ce-certificateIssuer OBJECT IDENTIFIER ::= { id-ce 29 } + oidCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29} + // RFC5290, 4.2.1.1 id-ce-authorityKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 35 } + oidAuthorityKeyIdentifier = asn1.ObjectIdentifier{2, 5, 29, 35} +) + +// x509NameHash implements the OpenSSL X509_NAME_hash function for hashed directory lookups. +// +// NOTE: due to the behavior of asn1.Marshal, if the original encoding of the RDN sequence +// contains strings which do not use the ASN.1 PrintableString type, the name will not be +// re-encoded using those types, resulting in a hash which does not match that produced +// by OpenSSL. +func x509NameHash(r pkix.RDNSequence) string { + var canonBytes []byte + // First, canonicalize all the strings. + for _, rdnSet := range r { + for i, rdn := range rdnSet { + value, ok := rdn.Value.(string) + if !ok { + continue + } + // OpenSSL trims all whitespace, does a tolower, and removes extra spaces between words. + // Implemented in x509_name_canon in OpenSSL + canonStr := strings.Join(strings.Fields( + strings.TrimSpace(strings.ToLower(value))), " ") + // Then it changes everything to UTF8 strings + rdnSet[i].Value = asn1.RawValue{Tag: asn1.TagUTF8String, Bytes: []byte(canonStr)} + + } + } + + // Finally, OpenSSL drops the initial sequence tag + // so we marshal all the RDNs separately instead of as a group. + for _, canonRdn := range r { + b, err := asn1.Marshal(canonRdn) + if err != nil { + continue + } + canonBytes = append(canonBytes, b...) + } + + issuerHash := sha1.Sum(canonBytes) + // Openssl takes the first 4 bytes and encodes them as a little endian + // uint32 and then uses the hex to make the file name. + // In C++, this would be: + // (((unsigned long)md[0]) | ((unsigned long)md[1] << 8L) | + // ((unsigned long)md[2] << 16L) | ((unsigned long)md[3] << 24L) + // ) & 0xffffffffL; + fileHash := binary.LittleEndian.Uint32(issuerHash[0:4]) + return fmt.Sprintf("%08x", fileHash) +} + +// CheckRevocation checks the connection for revoked certificates based on RFC5280. +// This implementation has the following major limitations: +// - Indirect CRL files are not supported. +// - CRL loading is only supported from directories in the X509_LOOKUP_hash_dir format. +// - OnlySomeReasons is not supported. +// - Delta CRL files are not supported. +// - Certificate CRLDistributionPoint must be URLs, but are then ignored and converted into a file path. +// - CRL checks are done after path building, which goes against RFC4158. +func CheckRevocation(conn tls.ConnectionState, cfg RevocationConfig) error { + return CheckChainRevocation(conn.VerifiedChains, cfg) +} + +// CheckChainRevocation checks the verified certificate chain +// for revoked certificates based on RFC5280. +func CheckChainRevocation(verifiedChains [][]*x509.Certificate, cfg RevocationConfig) error { + // Iterate the verified chains looking for one that is RevocationUnrevoked. + // A single RevocationUnrevoked chain is enough to allow the connection, and a single RevocationRevoked + // chain does not mean the connection should fail. + count := make(map[RevocationStatus]int) + for _, chain := range verifiedChains { + switch checkChain(chain, cfg) { + case RevocationUnrevoked: + // If any chain is RevocationUnrevoked then return no error. + return nil + case RevocationRevoked: + // If this chain is revoked, keep looking for another chain. + count[RevocationRevoked]++ + continue + case RevocationUndetermined: + if cfg.AllowUndetermined { + return nil + } + count[RevocationUndetermined]++ + continue + } + } + return fmt.Errorf("no unrevoked chains found: %v", count) +} + +// checkChain will determine and check all certificates in chain against the CRL +// defined in the certificate with the following rules: +// 1. If any certificate is RevocationRevoked, return RevocationRevoked. +// 2. If any certificate is RevocationUndetermined, return RevocationUndetermined. +// 3. If all certificates are RevocationUnrevoked, return RevocationUnrevoked. +func checkChain(chain []*x509.Certificate, cfg RevocationConfig) RevocationStatus { + chainStatus := RevocationUnrevoked + for _, c := range chain { + switch checkCert(c, chain, cfg) { + case RevocationRevoked: + // Easy case, if a cert in the chain is revoked, the chain is revoked. + return RevocationRevoked + case RevocationUndetermined: + // If we couldn't find the revocation status for a cert, the chain is at best RevocationUndetermined + // keep looking to see if we find a cert in the chain that's RevocationRevoked, + // but return RevocationUndetermined at a minimum. + chainStatus = RevocationUndetermined + case RevocationUnrevoked: + // Continue iterating up the cert chain. + continue + } + } + return chainStatus +} + +func cachedCrl(rawIssuer []byte, cache Cache) (*certificateListExt, bool) { + val, ok := cache.Get(hex.EncodeToString(rawIssuer)) + if !ok { + return nil, false + } + crl, ok := val.(*certificateListExt) + if !ok { + return nil, false + } + // If the CRL is expired, force a reload. + if hasExpired(crl.CertList, time.Now()) { + return nil, false + } + return crl, true +} + +// fetchIssuerCRL fetches and verifies the CRL for rawIssuer from disk or cache if configured in cfg. +func fetchIssuerCRL(rawIssuer []byte, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) (*certificateListExt, error) { + if cfg.Cache != nil { + if crl, ok := cachedCrl(rawIssuer, cfg.Cache); ok { + return crl, nil + } + } + + crl, err := fetchCRL(rawIssuer, cfg) + if err != nil { + return nil, fmt.Errorf("fetchCRL() failed: %v", err) + } + + if err := verifyCRL(crl, rawIssuer, crlVerifyCrt); err != nil { + return nil, fmt.Errorf("verifyCRL() failed: %v", err) + } + if cfg.Cache != nil { + cfg.Cache.Add(hex.EncodeToString(rawIssuer), crl) + } + return crl, nil +} + +// checkCert checks a single certificate against the CRL defined in the certificate. +// It will fetch and verify the CRL(s) defined in the root directory specified by cfg. +// If we can't load any authoritative CRL files, the status is RevocationUndetermined. +// c is the certificate to check. +// crlVerifyCrt is the group of possible certificates to verify the crl. +func checkCert(c *x509.Certificate, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) RevocationStatus { + crl, err := fetchIssuerCRL(c.RawIssuer, crlVerifyCrt, cfg) + if err != nil { + // We couldn't load any CRL files for the certificate, so we don't know if it's RevocationUnrevoked or not. + grpclogLogger.Warningf("getIssuerCRL(%v) err = %v", c.Issuer, err) + return RevocationUndetermined + } + revocation, err := checkCertRevocation(c, crl) + if err != nil { + grpclogLogger.Warningf("checkCertRevocation(CRL %v) failed: %v", crl.CertList.Issuer, err) + // We couldn't check the CRL file for some reason, so we don't know if it's RevocationUnrevoked or not. + return RevocationUndetermined + } + // Here we've gotten a CRL that loads and verifies. + // We only handle all-reasons CRL files, so this file + // is authoritative for the certificate. + return revocation +} + +func checkCertRevocation(c *x509.Certificate, crl *certificateListExt) (RevocationStatus, error) { + // Per section 5.3.3 we prime the certificate issuer with the CRL issuer. + // Subsequent entries use the previous entry's issuer. + rawEntryIssuer := crl.RawIssuer + + // Loop through all the revoked certificates. + for _, revCert := range crl.CertList.RevokedCertificates { + // 5.3 Loop through CRL entry extensions for needed information. + for _, ext := range revCert.Extensions { + if oidCertificateIssuer.Equal(ext.Id) { + extIssuer, err := parseCertIssuerExt(ext) + if err != nil { + grpclogLogger.Info(err) + if ext.Critical { + return RevocationUndetermined, err + } + // Since this is a non-critical extension, we can skip it even though + // there was a parsing failure. + continue + } + rawEntryIssuer = extIssuer + } else if ext.Critical { + return RevocationUndetermined, fmt.Errorf("checkCertRevocation: Unhandled critical extension: %v", ext.Id) + } + } + + // If the issuer and serial number appear in the CRL, the certificate is revoked. + if bytes.Equal(c.RawIssuer, rawEntryIssuer) && c.SerialNumber.Cmp(revCert.SerialNumber) == 0 { + // CRL contains the serial, so return revoked. + return RevocationRevoked, nil + } + } + // We did not find the serial in the CRL file that was valid for the cert + // so the certificate is not revoked. + return RevocationUnrevoked, nil +} + +func parseCertIssuerExt(ext pkix.Extension) ([]byte, error) { + // 5.3.3 Certificate Issuer + // CertificateIssuer ::= GeneralNames + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + var generalNames []asn1.RawValue + if rest, err := asn1.Unmarshal(ext.Value, &generalNames); err != nil || len(rest) != 0 { + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) + } + + for _, generalName := range generalNames { + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + if generalName.Tag == tagDirectoryName { + return generalName.Bytes, nil + } + } + // Conforming CRL issuers MUST include in this extension the + // distinguished name (DN) from the issuer field of the certificate that + // corresponds to this CRL entry. + // If we couldn't get a directoryName, we can't reason about this file so cert status is + // RevocationUndetermined. + return nil, errors.New("no DN found in certificate issuer") +} + +// RFC 5280, 4.2.1.1 +type authKeyID struct { + ID []byte `asn1:"optional,tag:0"` +} + +// RFC5280, 5.2.5 +// id-ce-issuingDistributionPoint OBJECT IDENTIFIER ::= { id-ce 28 } + +// IssuingDistributionPoint ::= SEQUENCE { +// distributionPoint [0] DistributionPointName OPTIONAL, +// onlyContainsUserCerts [1] BOOLEAN DEFAULT FALSE, +// onlyContainsCACerts [2] BOOLEAN DEFAULT FALSE, +// onlySomeReasons [3] ReasonFlags OPTIONAL, +// indirectCRL [4] BOOLEAN DEFAULT FALSE, +// onlyContainsAttributeCerts [5] BOOLEAN DEFAULT FALSE } + +// -- at most one of onlyContainsUserCerts, onlyContainsCACerts, +// -- and onlyContainsAttributeCerts may be set to TRUE. +type issuingDistributionPoint struct { + DistributionPoint asn1.RawValue `asn1:"optional,tag:0"` + OnlyContainsUserCerts bool `asn1:"optional,tag:1"` + OnlyContainsCACerts bool `asn1:"optional,tag:2"` + OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"` + IndirectCRL bool `asn1:"optional,tag:4"` + OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"` +} + +// parseCRLExtensions parses the extensions for a CRL +// and checks that they're supported by the parser. +func parseCRLExtensions(c *x509.RevocationList) (*certificateListExt, error) { + if c == nil { + return nil, errors.New("c is nil, expected any value") + } + certList := &certificateListExt{CertList: c} + + for _, ext := range c.Extensions { + switch { + case oidDeltaCRLIndicator.Equal(ext.Id): + return nil, fmt.Errorf("delta CRLs unsupported") + + case oidAuthorityKeyIdentifier.Equal(ext.Id): + var a authKeyID + if rest, err := asn1.Unmarshal(ext.Value, &a); err != nil { + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) + } else if len(rest) != 0 { + return nil, errors.New("trailing data after AKID extension") + } + certList.AuthorityKeyID = a.ID + + case oidIssuingDistributionPoint.Equal(ext.Id): + var dp issuingDistributionPoint + if rest, err := asn1.Unmarshal(ext.Value, &dp); err != nil { + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) + } else if len(rest) != 0 { + return nil, errors.New("trailing data after IssuingDistributionPoint extension") + } + + if dp.OnlyContainsUserCerts || dp.OnlyContainsCACerts || dp.OnlyContainsAttributeCerts { + return nil, errors.New("CRL only contains some certificate types") + } + if dp.IndirectCRL { + return nil, errors.New("indirect CRLs unsupported") + } + if dp.OnlySomeReasons.BitLength != 0 { + return nil, errors.New("onlySomeReasons unsupported") + } + + case ext.Critical: + return nil, fmt.Errorf("unsupported critical extension: %v", ext.Id) + } + } + + if len(certList.AuthorityKeyID) == 0 { + return nil, errors.New("authority key identifier extension missing") + } + return certList, nil +} + +func fetchCRL(rawIssuer []byte, cfg RevocationConfig) (*certificateListExt, error) { + var parsedCRL *certificateListExt + // 6.3.3 (a) (1) (ii) + // According to X509_LOOKUP_hash_dir the format is issuer_hash.rN where N is an increasing number. + // There are no gaps, so we break when we can't find a file. + for i := 0; ; i++ { + // Unmarshal to RDNSeqence according to http://go/godoc/crypto/x509/pkix/#Name. + var r pkix.RDNSequence + rest, err := asn1.Unmarshal(rawIssuer, &r) + if len(rest) != 0 || err != nil { + return nil, fmt.Errorf("asn1.Unmarshal(Issuer) len(rest) = %d failed: %v", len(rest), err) + } + crlPath := fmt.Sprintf("%s.r%d", filepath.Join(cfg.RootDir, x509NameHash(r)), i) + crlBytes, err := os.ReadFile(crlPath) + if err != nil { + // Break when we can't read a CRL file. + grpclogLogger.Infof("readFile: %v", err) + break + } + + crl, err := parseRevocationList(crlBytes) + if err != nil { + // Parsing errors for a CRL shouldn't happen so fail. + return nil, fmt.Errorf("parseRevocationList(%v) failed: %v", crlPath, err) + } + var certList *certificateListExt + if certList, err = parseCRLExtensions(crl); err != nil { + grpclogLogger.Infof("fetchCRL: unsupported crl %v: %v", crlPath, err) + // Continue to find a supported CRL + continue + } + + rawCRLIssuer, err := extractCRLIssuer(crlBytes) + if err != nil { + return nil, err + } + certList.RawIssuer = rawCRLIssuer + // RFC5280, 6.3.3 (b) Verify the issuer and scope of the complete CRL. + if bytes.Equal(rawIssuer, rawCRLIssuer) { + parsedCRL = certList + // Continue to find the highest number in the .rN suffix. + continue + } + } + + if parsedCRL == nil { + return nil, fmt.Errorf("fetchCrls no CRLs found for issuer") + } + return parsedCRL, nil +} + +func verifyCRL(crl *certificateListExt, rawIssuer []byte, chain []*x509.Certificate) error { + // RFC5280, 6.3.3 (f) Obtain and validateate the certification path for the issuer of the complete CRL + // We intentionally limit our CRLs to be signed with the same certificate path as the certificate + // so we can use the chain from the connection. + + for _, c := range chain { + // Use the key where the subject and KIDs match. + // This departs from RFC4158, 3.5.12 which states that KIDs + // cannot eliminate certificates, but RFC5280, 5.2.1 states that + // "Conforming CRL issuers MUST use the key identifier method, and MUST + // include this extension in all CRLs issued." + // So, this is much simpler than RFC4158 and should be compatible. + if bytes.Equal(c.SubjectKeyId, crl.AuthorityKeyID) && bytes.Equal(c.RawSubject, crl.RawIssuer) { + // RFC5280, 6.3.3 (g) Validate signature. + return crl.CertList.CheckSignatureFrom(c) + } + } + return fmt.Errorf("verifyCRL: No certificates mached CRL issuer (%v)", crl.CertList.Issuer) +} + +// pemType is the type of a PEM encoded CRL. +const pemType string = "X509 CRL" + +var crlPemPrefix = []byte("-----BEGIN X509 CRL") + +func crlPemToDer(crlBytes []byte) []byte { + block, _ := pem.Decode(crlBytes) + if block != nil && block.Type == pemType { + crlBytes = block.Bytes + } + return crlBytes +} + +// extractCRLIssuer extracts the raw ASN.1 encoding of the CRL issuer. Due to the design of +// pkix.CertificateList and pkix.RDNSequence, it is not possible to reliably marshal the +// parsed Issuer to it's original raw encoding. +func extractCRLIssuer(crlBytes []byte) ([]byte, error) { + if bytes.HasPrefix(crlBytes, crlPemPrefix) { + crlBytes = crlPemToDer(crlBytes) + } + der := cryptobyte.String(crlBytes) + var issuer cryptobyte.String + if !der.ReadASN1(&der, cbasn1.SEQUENCE) || + !der.ReadASN1(&der, cbasn1.SEQUENCE) || + !der.SkipOptionalASN1(cbasn1.INTEGER) || + !der.SkipASN1(cbasn1.SEQUENCE) || + !der.ReadASN1Element(&issuer, cbasn1.SEQUENCE) { + return nil, errors.New("extractCRLIssuer: invalid ASN.1 encoding") + } + return issuer, nil +} + +func hasExpired(crl *x509.RevocationList, now time.Time) bool { + return !now.Before(crl.NextUpdate) +} + +// parseRevocationList comes largely from here +// x509.go: +// https://github.com/golang/go/blob/e2f413402527505144beea443078649380e0c545/src/crypto/x509/x509.go#L1669-L1690 +// We must first convert PEM to DER to be able to use the new +// x509.ParseRevocationList instead of the deprecated x509.ParseCRL +func parseRevocationList(crlBytes []byte) (*x509.RevocationList, error) { + if bytes.HasPrefix(crlBytes, crlPemPrefix) { + crlBytes = crlPemToDer(crlBytes) + } + crl, err := x509.ParseRevocationList(crlBytes) + if err != nil { + return nil, err + } + return crl, nil +} diff --git a/security/advancedtls/crl_deprecated.go b/security/advancedtls/crl_deprecated.go new file mode 100644 index 000000000000..a54a2f6e55c5 --- /dev/null +++ b/security/advancedtls/crl_deprecated.go @@ -0,0 +1,521 @@ +// TODO(@gregorycooke) - Remove file when only golang 1.19+ is supported +//go:build !go1.19 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package advancedtls + +import ( + "bytes" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/crypto/cryptobyte" + cbasn1 "golang.org/x/crypto/cryptobyte/asn1" + "google.golang.org/grpc/grpclog" +) + +var grpclogLogger = grpclog.Component("advancedtls") + +// Cache is an interface to cache CRL files. +// The cache implementation must be concurrency safe. +// A fixed size lru cache from golang-lru is recommended. +type Cache interface { + // Add adds a value to the cache. + Add(key, value interface{}) bool + // Get looks up a key's value from the cache. + Get(key interface{}) (value interface{}, ok bool) +} + +// RevocationConfig contains options for CRL lookup. +type RevocationConfig struct { + // RootDir is the directory to search for CRL files. + // Directory format must match OpenSSL X509_LOOKUP_hash_dir(3). + RootDir string + // AllowUndetermined controls if certificate chains with RevocationUndetermined + // revocation status are allowed to complete. + AllowUndetermined bool + // Cache will store CRL files if not nil, otherwise files are reloaded for every lookup. + Cache Cache +} + +// RevocationStatus is the revocation status for a certificate or chain. +type RevocationStatus int + +const ( + // RevocationUndetermined means we couldn't find or verify a CRL for the cert. + RevocationUndetermined RevocationStatus = iota + // RevocationUnrevoked means we found the CRL for the cert and the cert is not revoked. + RevocationUnrevoked + // RevocationRevoked means we found the CRL and the cert is revoked. + RevocationRevoked +) + +func (s RevocationStatus) String() string { + return [...]string{"RevocationUndetermined", "RevocationUnrevoked", "RevocationRevoked"}[s] +} + +// certificateListExt contains a pkix.CertificateList and parsed +// extensions that aren't provided by the golang CRL parser. +type certificateListExt struct { + CertList *pkix.CertificateList + // RFC5280, 5.2.1, all conforming CRLs must have a AKID with the ID method. + AuthorityKeyID []byte + RawIssuer []byte +} + +const tagDirectoryName = 4 + +var ( + // RFC5280, 5.2.4 id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } + oidDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27} + // RFC5280, 5.2.5 id-ce-issuingDistributionPoint OBJECT IDENTIFIER ::= { id-ce 28 } + oidIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28} + // RFC5280, 5.3.3 id-ce-certificateIssuer OBJECT IDENTIFIER ::= { id-ce 29 } + oidCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29} + // RFC5290, 4.2.1.1 id-ce-authorityKeyIdentifier OBJECT IDENTIFIER ::= { id-ce 35 } + oidAuthorityKeyIdentifier = asn1.ObjectIdentifier{2, 5, 29, 35} +) + +// x509NameHash implements the OpenSSL X509_NAME_hash function for hashed directory lookups. +// +// NOTE: due to the behavior of asn1.Marshal, if the original encoding of the RDN sequence +// contains strings which do not use the ASN.1 PrintableString type, the name will not be +// re-encoded using those types, resulting in a hash which does not match that produced +// by OpenSSL. +func x509NameHash(r pkix.RDNSequence) string { + var canonBytes []byte + // First, canonicalize all the strings. + for _, rdnSet := range r { + for i, rdn := range rdnSet { + value, ok := rdn.Value.(string) + if !ok { + continue + } + // OpenSSL trims all whitespace, does a tolower, and removes extra spaces between words. + // Implemented in x509_name_canon in OpenSSL + canonStr := strings.Join(strings.Fields( + strings.TrimSpace(strings.ToLower(value))), " ") + // Then it changes everything to UTF8 strings + rdnSet[i].Value = asn1.RawValue{Tag: asn1.TagUTF8String, Bytes: []byte(canonStr)} + + } + } + + // Finally, OpenSSL drops the initial sequence tag + // so we marshal all the RDNs separately instead of as a group. + for _, canonRdn := range r { + b, err := asn1.Marshal(canonRdn) + if err != nil { + continue + } + canonBytes = append(canonBytes, b...) + } + + issuerHash := sha1.Sum(canonBytes) + // Openssl takes the first 4 bytes and encodes them as a little endian + // uint32 and then uses the hex to make the file name. + // In C++, this would be: + // (((unsigned long)md[0]) | ((unsigned long)md[1] << 8L) | + // ((unsigned long)md[2] << 16L) | ((unsigned long)md[3] << 24L) + // ) & 0xffffffffL; + fileHash := binary.LittleEndian.Uint32(issuerHash[0:4]) + return fmt.Sprintf("%08x", fileHash) +} + +// CheckRevocation checks the connection for revoked certificates based on RFC5280. +// This implementation has the following major limitations: +// - Indirect CRL files are not supported. +// - CRL loading is only supported from directories in the X509_LOOKUP_hash_dir format. +// - OnlySomeReasons is not supported. +// - Delta CRL files are not supported. +// - Certificate CRLDistributionPoint must be URLs, but are then ignored and converted into a file path. +// - CRL checks are done after path building, which goes against RFC4158. +func CheckRevocation(conn tls.ConnectionState, cfg RevocationConfig) error { + return CheckChainRevocation(conn.VerifiedChains, cfg) +} + +// CheckChainRevocation checks the verified certificate chain +// for revoked certificates based on RFC5280. +func CheckChainRevocation(verifiedChains [][]*x509.Certificate, cfg RevocationConfig) error { + // Iterate the verified chains looking for one that is RevocationUnrevoked. + // A single RevocationUnrevoked chain is enough to allow the connection, and a single RevocationRevoked + // chain does not mean the connection should fail. + count := make(map[RevocationStatus]int) + for _, chain := range verifiedChains { + switch checkChain(chain, cfg) { + case RevocationUnrevoked: + // If any chain is RevocationUnrevoked then return no error. + return nil + case RevocationRevoked: + // If this chain is revoked, keep looking for another chain. + count[RevocationRevoked]++ + continue + case RevocationUndetermined: + if cfg.AllowUndetermined { + return nil + } + count[RevocationUndetermined]++ + continue + } + } + return fmt.Errorf("no unrevoked chains found: %v", count) +} + +// checkChain will determine and check all certificates in chain against the CRL +// defined in the certificate with the following rules: +// 1. If any certificate is RevocationRevoked, return RevocationRevoked. +// 2. If any certificate is RevocationUndetermined, return RevocationUndetermined. +// 3. If all certificates are RevocationUnrevoked, return RevocationUnrevoked. +func checkChain(chain []*x509.Certificate, cfg RevocationConfig) RevocationStatus { + chainStatus := RevocationUnrevoked + for _, c := range chain { + switch checkCert(c, chain, cfg) { + case RevocationRevoked: + // Easy case, if a cert in the chain is revoked, the chain is revoked. + return RevocationRevoked + case RevocationUndetermined: + // If we couldn't find the revocation status for a cert, the chain is at best RevocationUndetermined + // keep looking to see if we find a cert in the chain that's RevocationRevoked, + // but return RevocationUndetermined at a minimum. + chainStatus = RevocationUndetermined + case RevocationUnrevoked: + // Continue iterating up the cert chain. + continue + } + } + return chainStatus +} + +func cachedCrl(rawIssuer []byte, cache Cache) (*certificateListExt, bool) { + val, ok := cache.Get(hex.EncodeToString(rawIssuer)) + if !ok { + return nil, false + } + crl, ok := val.(*certificateListExt) + if !ok { + return nil, false + } + // If the CRL is expired, force a reload. + if crl.CertList.HasExpired(time.Now()) { + return nil, false + } + return crl, true +} + +// fetchIssuerCRL fetches and verifies the CRL for rawIssuer from disk or cache if configured in cfg. +func fetchIssuerCRL(rawIssuer []byte, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) (*certificateListExt, error) { + if cfg.Cache != nil { + if crl, ok := cachedCrl(rawIssuer, cfg.Cache); ok { + return crl, nil + } + } + + crl, err := fetchCRL(rawIssuer, cfg) + if err != nil { + return nil, fmt.Errorf("fetchCRL() failed: %v", err) + } + + if err := verifyCRL(crl, rawIssuer, crlVerifyCrt); err != nil { + return nil, fmt.Errorf("verifyCRL() failed: %v", err) + } + if cfg.Cache != nil { + cfg.Cache.Add(hex.EncodeToString(rawIssuer), crl) + } + return crl, nil +} + +// checkCert checks a single certificate against the CRL defined in the certificate. +// It will fetch and verify the CRL(s) defined in the root directory specified by cfg. +// If we can't load any authoritative CRL files, the status is RevocationUndetermined. +// c is the certificate to check. +// crlVerifyCrt is the group of possible certificates to verify the crl. +func checkCert(c *x509.Certificate, crlVerifyCrt []*x509.Certificate, cfg RevocationConfig) RevocationStatus { + crl, err := fetchIssuerCRL(c.RawIssuer, crlVerifyCrt, cfg) + if err != nil { + // We couldn't load any CRL files for the certificate, so we don't know if it's RevocationUnrevoked or not. + grpclogLogger.Warningf("getIssuerCRL(%v) err = %v", c.Issuer, err) + return RevocationUndetermined + } + revocation, err := checkCertRevocation(c, crl) + if err != nil { + grpclogLogger.Warningf("checkCertRevocation(CRL %v) failed: %v", crl.CertList.TBSCertList.Issuer, err) + // We couldn't check the CRL file for some reason, so we don't know if it's RevocationUnrevoked or not. + return RevocationUndetermined + } + // Here we've gotten a CRL that loads and verifies. + // We only handle all-reasons CRL files, so this file + // is authoritative for the certificate. + return revocation +} + +func checkCertRevocation(c *x509.Certificate, crl *certificateListExt) (RevocationStatus, error) { + // Per section 5.3.3 we prime the certificate issuer with the CRL issuer. + // Subsequent entries use the previous entry's issuer. + rawEntryIssuer := crl.RawIssuer + + // Loop through all the revoked certificates. + for _, revCert := range crl.CertList.TBSCertList.RevokedCertificates { + // 5.3 Loop through CRL entry extensions for needed information. + for _, ext := range revCert.Extensions { + if oidCertificateIssuer.Equal(ext.Id) { + extIssuer, err := parseCertIssuerExt(ext) + if err != nil { + grpclogLogger.Info(err) + if ext.Critical { + return RevocationUndetermined, err + } + // Since this is a non-critical extension, we can skip it even though + // there was a parsing failure. + continue + } + rawEntryIssuer = extIssuer + } else if ext.Critical { + return RevocationUndetermined, fmt.Errorf("checkCertRevocation: Unhandled critical extension: %v", ext.Id) + } + } + + // If the issuer and serial number appear in the CRL, the certificate is revoked. + if bytes.Equal(c.RawIssuer, rawEntryIssuer) && c.SerialNumber.Cmp(revCert.SerialNumber) == 0 { + // CRL contains the serial, so return revoked. + return RevocationRevoked, nil + } + } + // We did not find the serial in the CRL file that was valid for the cert + // so the certificate is not revoked. + return RevocationUnrevoked, nil +} + +func parseCertIssuerExt(ext pkix.Extension) ([]byte, error) { + // 5.3.3 Certificate Issuer + // CertificateIssuer ::= GeneralNames + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + var generalNames []asn1.RawValue + if rest, err := asn1.Unmarshal(ext.Value, &generalNames); err != nil || len(rest) != 0 { + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) + } + + for _, generalName := range generalNames { + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + if generalName.Tag == tagDirectoryName { + return generalName.Bytes, nil + } + } + // Conforming CRL issuers MUST include in this extension the + // distinguished name (DN) from the issuer field of the certificate that + // corresponds to this CRL entry. + // If we couldn't get a directoryName, we can't reason about this file so cert status is + // RevocationUndetermined. + return nil, errors.New("no DN found in certificate issuer") +} + +// RFC 5280, 4.2.1.1 +type authKeyID struct { + ID []byte `asn1:"optional,tag:0"` +} + +// RFC5280, 5.2.5 +// id-ce-issuingDistributionPoint OBJECT IDENTIFIER ::= { id-ce 28 } + +// IssuingDistributionPoint ::= SEQUENCE { +// distributionPoint [0] DistributionPointName OPTIONAL, +// onlyContainsUserCerts [1] BOOLEAN DEFAULT FALSE, +// onlyContainsCACerts [2] BOOLEAN DEFAULT FALSE, +// onlySomeReasons [3] ReasonFlags OPTIONAL, +// indirectCRL [4] BOOLEAN DEFAULT FALSE, +// onlyContainsAttributeCerts [5] BOOLEAN DEFAULT FALSE } + +// -- at most one of onlyContainsUserCerts, onlyContainsCACerts, +// -- and onlyContainsAttributeCerts may be set to TRUE. +type issuingDistributionPoint struct { + DistributionPoint asn1.RawValue `asn1:"optional,tag:0"` + OnlyContainsUserCerts bool `asn1:"optional,tag:1"` + OnlyContainsCACerts bool `asn1:"optional,tag:2"` + OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"` + IndirectCRL bool `asn1:"optional,tag:4"` + OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"` +} + +// parseCRLExtensions parses the extensions for a CRL +// and checks that they're supported by the parser. +func parseCRLExtensions(c *pkix.CertificateList) (*certificateListExt, error) { + if c == nil { + return nil, errors.New("c is nil, expected any value") + } + certList := &certificateListExt{CertList: c} + + for _, ext := range c.TBSCertList.Extensions { + switch { + case oidDeltaCRLIndicator.Equal(ext.Id): + return nil, fmt.Errorf("delta CRLs unsupported") + + case oidAuthorityKeyIdentifier.Equal(ext.Id): + var a authKeyID + if rest, err := asn1.Unmarshal(ext.Value, &a); err != nil { + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) + } else if len(rest) != 0 { + return nil, errors.New("trailing data after AKID extension") + } + certList.AuthorityKeyID = a.ID + + case oidIssuingDistributionPoint.Equal(ext.Id): + var dp issuingDistributionPoint + if rest, err := asn1.Unmarshal(ext.Value, &dp); err != nil { + return nil, fmt.Errorf("asn1.Unmarshal failed: %v", err) + } else if len(rest) != 0 { + return nil, errors.New("trailing data after IssuingDistributionPoint extension") + } + + if dp.OnlyContainsUserCerts || dp.OnlyContainsCACerts || dp.OnlyContainsAttributeCerts { + return nil, errors.New("CRL only contains some certificate types") + } + if dp.IndirectCRL { + return nil, errors.New("indirect CRLs unsupported") + } + if dp.OnlySomeReasons.BitLength != 0 { + return nil, errors.New("onlySomeReasons unsupported") + } + + case ext.Critical: + return nil, fmt.Errorf("unsupported critical extension: %v", ext.Id) + } + } + + if len(certList.AuthorityKeyID) == 0 { + return nil, errors.New("authority key identifier extension missing") + } + return certList, nil +} + +func fetchCRL(rawIssuer []byte, cfg RevocationConfig) (*certificateListExt, error) { + var parsedCRL *certificateListExt + // 6.3.3 (a) (1) (ii) + // According to X509_LOOKUP_hash_dir the format is issuer_hash.rN where N is an increasing number. + // There are no gaps, so we break when we can't find a file. + for i := 0; ; i++ { + // Unmarshal to RDNSeqence according to http://go/godoc/crypto/x509/pkix/#Name. + var r pkix.RDNSequence + rest, err := asn1.Unmarshal(rawIssuer, &r) + if len(rest) != 0 || err != nil { + return nil, fmt.Errorf("asn1.Unmarshal(Issuer) len(rest) = %d failed: %v", len(rest), err) + } + crlPath := fmt.Sprintf("%s.r%d", filepath.Join(cfg.RootDir, x509NameHash(r)), i) + crlBytes, err := os.ReadFile(crlPath) + if err != nil { + // Break when we can't read a CRL file. + grpclogLogger.Infof("readFile: %v", err) + break + } + + crl, err := x509.ParseCRL(crlBytes) + if err != nil { + // Parsing errors for a CRL shouldn't happen so fail. + return nil, fmt.Errorf("x509.ParseCrl(%v) failed: %v", crlPath, err) + } + var certList *certificateListExt + if certList, err = parseCRLExtensions(crl); err != nil { + grpclogLogger.Infof("fetchCRL: unsupported crl %v: %v", crlPath, err) + // Continue to find a supported CRL + continue + } + + rawCRLIssuer, err := extractCRLIssuer(crlBytes) + if err != nil { + return nil, err + } + certList.RawIssuer = rawCRLIssuer + // RFC5280, 6.3.3 (b) Verify the issuer and scope of the complete CRL. + if bytes.Equal(rawIssuer, rawCRLIssuer) { + parsedCRL = certList + // Continue to find the highest number in the .rN suffix. + continue + } + } + + if parsedCRL == nil { + return nil, fmt.Errorf("fetchCrls no CRLs found for issuer") + } + return parsedCRL, nil +} + +func verifyCRL(crl *certificateListExt, rawIssuer []byte, chain []*x509.Certificate) error { + // RFC5280, 6.3.3 (f) Obtain and validateate the certification path for the issuer of the complete CRL + // We intentionally limit our CRLs to be signed with the same certificate path as the certificate + // so we can use the chain from the connection. + + for _, c := range chain { + // Use the key where the subject and KIDs match. + // This departs from RFC4158, 3.5.12 which states that KIDs + // cannot eliminate certificates, but RFC5280, 5.2.1 states that + // "Conforming CRL issuers MUST use the key identifier method, and MUST + // include this extension in all CRLs issued." + // So, this is much simpler than RFC4158 and should be compatible. + if bytes.Equal(c.SubjectKeyId, crl.AuthorityKeyID) && bytes.Equal(c.RawSubject, crl.RawIssuer) { + // RFC5280, 6.3.3 (g) Validate signature. + return c.CheckCRLSignature(crl.CertList) + } + } + return fmt.Errorf("verifyCRL: No certificates mached CRL issuer (%v)", crl.CertList.TBSCertList.Issuer) +} + +var crlPemPrefix = []byte("-----BEGIN X509 CRL") + +// extractCRLIssuer extracts the raw ASN.1 encoding of the CRL issuer. Due to the design of +// pkix.CertificateList and pkix.RDNSequence, it is not possible to reliably marshal the +// parsed Issuer to it's original raw encoding. +func extractCRLIssuer(crlBytes []byte) ([]byte, error) { + if bytes.HasPrefix(crlBytes, crlPemPrefix) { + block, _ := pem.Decode(crlBytes) + if block != nil && block.Type == "X509 CRL" { + crlBytes = block.Bytes + } + } + + der := cryptobyte.String(crlBytes) + var issuer cryptobyte.String + if !der.ReadASN1(&der, cbasn1.SEQUENCE) || + !der.ReadASN1(&der, cbasn1.SEQUENCE) || + !der.SkipOptionalASN1(cbasn1.INTEGER) || + !der.SkipASN1(cbasn1.SEQUENCE) || + !der.ReadASN1Element(&issuer, cbasn1.SEQUENCE) { + return nil, errors.New("extractCRLIssuer: invalid ASN.1 encoding") + } + return issuer, nil +} diff --git a/security/advancedtls/crl_deprecated_test.go b/security/advancedtls/crl_deprecated_test.go new file mode 100644 index 000000000000..f51ab95d00ce --- /dev/null +++ b/security/advancedtls/crl_deprecated_test.go @@ -0,0 +1,775 @@ +// TODO(@gregorycooke) - Remove file when only golang 1.19+ is supported +//go:build !go1.19 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package advancedtls + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "fmt" + "math/big" + "net" + "os" + "path" + "strings" + "testing" + "time" + + lru "github.com/hashicorp/golang-lru" + "google.golang.org/grpc/security/advancedtls/testdata" +) + +func TestX509NameHash(t *testing.T) { + nameTests := []struct { + in pkix.Name + out string + }{ + { + in: pkix.Name{ + Country: []string{"US"}, + Organization: []string{"Example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{"us"}, + Organization: []string{"example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{" us"}, + Organization: []string{"example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"BoringSSL"}, + }, + out: "c24414d9", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"BoringSSL"}, + }, + out: "c24414d9", + }, + { + in: pkix.Name{ + SerialNumber: "87f4514475ba0a2b", + }, + out: "9dc713cd", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"Google LLC"}, + OrganizationalUnit: []string{"Production", "campus-sln"}, + CommonName: "Root CA (2021-02-02T07:30:36-08:00)", + }, + out: "0b35a562", + }, + { + in: pkix.Name{ + ExtraNames: []pkix.AttributeTypeAndValue{ + {Type: asn1.ObjectIdentifier{5, 5, 5, 5}, Value: "aaaa"}, + }, + }, + out: "eea339da", + }, + } + for _, tt := range nameTests { + t.Run(tt.in.String(), func(t *testing.T) { + h := x509NameHash(tt.in.ToRDNSequence()) + if h != tt.out { + t.Errorf("x509NameHash(%v): Got %v wanted %v", tt.in, h, tt.out) + } + }) + } +} + +func TestUnsupportedCRLs(t *testing.T) { + crlBytesSomeReasons := []byte(`-----BEGIN X509 CRL----- +MIIEeDCCA2ACAQEwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCVVMxHjAcBgNV +BAoTFUdvb2dsZSBUcnVzdCBTZXJ2aWNlczETMBEGA1UEAxMKR1RTIENBIDFPMRcN +MjEwNDI2MTI1OTQxWhcNMjEwNTA2MTE1OTQwWjCCAn0wIgIRAPOOG3L4VLC7CAAA +AABxQgEXDTIxMDQxOTEyMTgxOFowIQIQUK0UwBZkVdQIAAAAAHFCBRcNMjEwNDE5 +MTIxODE4WjAhAhBRIXBJaKoQkQgAAAAAcULHFw0yMTA0MjAxMjE4MTdaMCICEQCv +qQWUq5UxmQgAAAAAcULMFw0yMTA0MjAxMjE4MTdaMCICEQDdv5k1kKwKTQgAAAAA +cUOQFw0yMTA0MjExMjE4MTZaMCICEQDGIEfR8N9sEAgAAAAAcUOWFw0yMTA0MjEx +MjE4MThaMCECEBHgbLXlj5yUCAAAAABxQ/IXDTIxMDQyMTIzMDAyNlowIQIQE1wT +2GGYqKwIAAAAAHFD7xcNMjEwNDIxMjMwMDI5WjAiAhEAo/bSyDjpVtsIAAAAAHFE +txcNMjEwNDIyMjMwMDI3WjAhAhARdCrSrHE0dAgAAAAAcUS/Fw0yMTA0MjIyMzAw +MjhaMCECEHONohfWn3wwCAAAAABxRX8XDTIxMDQyMzIzMDAyOVowIgIRAOYkiUPA +os4vCAAAAABxRYgXDTIxMDQyMzIzMDAyOFowIQIQRNTow5Eg2gEIAAAAAHFGShcN +MjEwNDI0MjMwMDI2WjAhAhBX32dH4/WQ6AgAAAAAcUZNFw0yMTA0MjQyMzAwMjZa +MCICEQDHnUM1vsaP/wgAAAAAcUcQFw0yMTA0MjUyMzAwMjZaMCECEEm5rvmL8sj6 +CAAAAABxRxQXDTIxMDQyNTIzMDAyN1owIQIQW16OQs4YQYkIAAAAAHFIABcNMjEw +NDI2MTI1NDA4WjAhAhAhSohpYsJtDQgAAAAAcUgEFw0yMTA0MjYxMjU0MDlaoGkw +ZzAfBgNVHSMEGDAWgBSY0fhuEOvPm+xgnxiQG6DrfQn9KzALBgNVHRQEBAICBngw +NwYDVR0cAQH/BC0wK6AmoCSGImh0dHA6Ly9jcmwucGtpLmdvb2cvR1RTMU8xY29y +ZS5jcmyBAf8wDQYJKoZIhvcNAQELBQADggEBADPBXbxVxMJ1HC7btXExRUpJHUlU +YbeCZGx6zj5F8pkopbmpV7cpewwhm848Fx4VaFFppZQZd92O08daEC6aEqoug4qF +z6ZrOLzhuKfpW8E93JjgL91v0FYN7iOcT7+ERKCwVEwEkuxszxs7ggW6OJYJNvHh +priIdmcPoiQ3ZrIRH0vE3BfUcNXnKFGATWuDkiRI0I4A5P7NiOf+lAuGZet3/eom +0chgts6sdau10GfeUpHUd4f8e93cS/QeLeG16z7LC8vRLstU3m3vrknpZbdGqSia +97w66mqcnQh9V0swZiEnVLmLufaiuDZJ+6nUzSvLqBlb/ei3T/tKV0BoKJA= +-----END X509 CRL-----`) + + crlBytesIndirect := []byte(`-----BEGIN X509 CRL----- +MIIDGjCCAgICAQEwDQYJKoZIhvcNAQELBQAwdjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAoTC1Rlc3RpbmcgTHRkMSowKAYDVQQLEyFU +ZXN0aW5nIEx0ZCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxEDAOBgNVBAMTB1Rlc3Qg +Q0EXDTIxMDExNjAyMjAxNloXDTIxMDEyMDA2MjAxNlowgfIwbAIBAhcNMjEwMTE2 +MDIyMDE2WjBYMAoGA1UdFQQDCgEEMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQG +EwNVU0ExDTALBgNVBAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0 +MTAgAgEDFw0yMTAxMTYwMjIwMTZaMAwwCgYDVR0VBAMKAQEwYAIBBBcNMjEwMTE2 +MDIyMDE2WjBMMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQGEwNVU0ExDTALBgNV +BAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0MqBjMGEwHwYDVR0j +BBgwFoAURJSDWAOfhGCryBjl8dsQjBitl3swCgYDVR0UBAMCAQEwMgYDVR0cAQH/ +BCgwJqAhoB+GHWh0dHA6Ly9jcmxzLnBraS5nb29nL3Rlc3QuY3JshAH/MA0GCSqG +SIb3DQEBCwUAA4IBAQBVXX67mr2wFPmEWCe6mf/wFnPl3xL6zNOl96YJtsd7ulcS +TEbdJpaUnWFQ23+Tpzdj/lI2aQhTg5Lvii3o+D8C5r/Jc5NhSOtVJJDI/IQLh4pG +NgGdljdbJQIT5D2Z71dgbq1ocxn8DefZIJjO3jp8VnAm7AIMX2tLTySzD2MpMeMq +XmcN4lG1e4nx+xjzp7MySYO42NRY3LkphVzJhu3dRBYhBKViRJxw9hLttChitJpF +6Kh6a0QzrEY/QDJGhE1VrAD2c5g/SKnHPDVoCWo4ACIICi76KQQSIWfIdp4W/SY3 +qsSIp8gfxSyzkJP+Ngkm2DdLjlJQCZ9R0MZP9Xj4 +-----END X509 CRL-----`) + + var tests = []struct { + desc string + in []byte + }{ + { + desc: "some reasons", + in: crlBytesSomeReasons, + }, + { + desc: "indirect", + in: crlBytesIndirect, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + crl, err := x509.ParseCRL(tt.in) + if err != nil { + t.Fatal(err) + } + if _, err := parseCRLExtensions(crl); err == nil { + t.Error("expected error got ok") + } + }) + } +} + +func TestCheckCertRevocation(t *testing.T) { + dummyCrlFile := []byte(`-----BEGIN X509 CRL----- +MIIDGjCCAgICAQEwDQYJKoZIhvcNAQELBQAwdjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAoTC1Rlc3RpbmcgTHRkMSowKAYDVQQLEyFU +ZXN0aW5nIEx0ZCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxEDAOBgNVBAMTB1Rlc3Qg +Q0EXDTIxMDExNjAyMjAxNloXDTIxMDEyMDA2MjAxNlowgfIwbAIBAhcNMjEwMTE2 +MDIyMDE2WjBYMAoGA1UdFQQDCgEEMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQG +EwNVU0ExDTALBgNVBAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0 +MTAgAgEDFw0yMTAxMTYwMjIwMTZaMAwwCgYDVR0VBAMKAQEwYAIBBBcNMjEwMTE2 +MDIyMDE2WjBMMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQGEwNVU0ExDTALBgNV +BAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0MqBjMGEwHwYDVR0j +BBgwFoAURJSDWAOfhGCryBjl8dsQjBitl3swCgYDVR0UBAMCAQEwMgYDVR0cAQH/ +BCgwJqAhoB+GHWh0dHA6Ly9jcmxzLnBraS5nb29nL3Rlc3QuY3JshAH/MA0GCSqG +SIb3DQEBCwUAA4IBAQBVXX67mr2wFPmEWCe6mf/wFnPl3xL6zNOl96YJtsd7ulcS +TEbdJpaUnWFQ23+Tpzdj/lI2aQhTg5Lvii3o+D8C5r/Jc5NhSOtVJJDI/IQLh4pG +NgGdljdbJQIT5D2Z71dgbq1ocxn8DefZIJjO3jp8VnAm7AIMX2tLTySzD2MpMeMq +XmcN4lG1e4nx+xjzp7MySYO42NRY3LkphVzJhu3dRBYhBKViRJxw9hLttChitJpF +6Kh6a0QzrEY/QDJGhE1VrAD2c5g/SKnHPDVoCWo4ACIICi76KQQSIWfIdp4W/SY3 +qsSIp8gfxSyzkJP+Ngkm2DdLjlJQCZ9R0MZP9Xj4 +-----END X509 CRL-----`) + crl, err := x509.ParseCRL(dummyCrlFile) + if err != nil { + t.Fatalf("x509.ParseCRL(dummyCrlFile) failed: %v", err) + } + crlExt := &certificateListExt{CertList: crl} + var crlIssuer pkix.Name + crlIssuer.FillFromRDNSequence(&crl.TBSCertList.Issuer) + + var revocationTests = []struct { + desc string + in x509.Certificate + revoked RevocationStatus + }{ + { + desc: "Single revoked", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test1", + }, + SerialNumber: big.NewInt(2), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Revoked no entry issuer", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test1", + }, + SerialNumber: big.NewInt(3), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Revoked new entry issuer", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test2", + }, + SerialNumber: big.NewInt(4), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Single unrevoked", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test2", + }, + SerialNumber: big.NewInt(1), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationUnrevoked, + }, + { + desc: "Single unrevoked Issuer", + in: x509.Certificate{ + Issuer: crlIssuer, + SerialNumber: big.NewInt(2), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationUnrevoked, + }, + } + + for _, tt := range revocationTests { + rawIssuer, err := asn1.Marshal(tt.in.Issuer.ToRDNSequence()) + if err != nil { + t.Fatalf("asn1.Marshal(%v) failed: %v", tt.in.Issuer.ToRDNSequence(), err) + } + tt.in.RawIssuer = rawIssuer + t.Run(tt.desc, func(t *testing.T) { + rev, err := checkCertRevocation(&tt.in, crlExt) + if err != nil { + t.Errorf("checkCertRevocation(%v) err = %v", tt.in.Issuer, err) + } else if rev != tt.revoked { + t.Errorf("checkCertRevocation(%v(%v)) returned %v wanted %v", + tt.in.Issuer, tt.in.SerialNumber, rev, tt.revoked) + } + }) + } +} + +func makeChain(t *testing.T, name string) []*x509.Certificate { + t.Helper() + + certChain := make([]*x509.Certificate, 0) + + rest, err := os.ReadFile(name) + if err != nil { + t.Fatalf("os.ReadFile(%v) failed %v", name, err) + } + for len(rest) > 0 { + var block *pem.Block + block, rest = pem.Decode(rest) + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatalf("ParseCertificate error %v", err) + } + t.Logf("Parsed Cert sub = %v iss = %v", c.Subject, c.Issuer) + certChain = append(certChain, c) + } + return certChain +} + +func loadCRL(t *testing.T, path string) *certificateListExt { + b, err := os.ReadFile(path) + if err != nil { + t.Fatalf("readFile(%v) failed err = %v", path, err) + } + crl, err := x509.ParseCRL(b) + if err != nil { + t.Fatalf("ParseCrl(%v) failed err = %v", path, err) + } + crlExt, err := parseCRLExtensions(crl) + if err != nil { + t.Fatalf("parseCRLExtensions(%v) failed err = %v", path, err) + } + crlExt.RawIssuer, err = extractCRLIssuer(b) + if err != nil { + t.Fatalf("extractCRLIssuer(%v) failed err= %v", path, err) + } + return crlExt +} + +func TestCachedCRL(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + tests := []struct { + desc string + val interface{} + ok bool + }{ + { + desc: "Valid", + val: &certificateListExt{ + CertList: &pkix.CertificateList{ + TBSCertList: pkix.TBSCertificateList{ + NextUpdate: time.Now().Add(time.Hour), + }, + }}, + ok: true, + }, + { + desc: "Expired", + val: &certificateListExt{ + CertList: &pkix.CertificateList{ + TBSCertList: pkix.TBSCertificateList{ + NextUpdate: time.Now().Add(-time.Hour), + }, + }}, + ok: false, + }, + { + desc: "Wrong Type", + val: "string", + ok: false, + }, + { + desc: "Empty", + val: nil, + ok: false, + }, + } + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if tt.val != nil { + cache.Add(hex.EncodeToString([]byte(tt.desc)), tt.val) + } + _, ok := cachedCrl([]byte(tt.desc), cache) + if tt.ok != ok { + t.Errorf("Cache ok error expected %v vs %v", tt.ok, ok) + } + }) + } +} + +func TestGetIssuerCRLCache(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + tests := []struct { + desc string + rawIssuer []byte + certs []*x509.Certificate + }{ + { + desc: "Valid", + rawIssuer: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1].RawIssuer, + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + }, + { + desc: "Unverified", + rawIssuer: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1].RawIssuer, + }, + { + desc: "Not Found", + rawIssuer: []byte("not_found"), + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + cache.Purge() + _, err := fetchIssuerCRL(tt.rawIssuer, tt.certs, RevocationConfig{ + RootDir: testdata.Path("."), + Cache: cache, + }) + if err == nil && cache.Len() == 0 { + t.Error("Verified CRL not added to cache") + } + if err != nil && cache.Len() != 0 { + t.Error("Unverified CRL added to cache") + } + }) + } +} + +func TestVerifyCrl(t *testing.T) { + tampered := loadCRL(t, testdata.Path("crl/1.crl")) + // Change the signature so it won't verify + tampered.CertList.SignatureValue.Bytes[0]++ + + verifyTests := []struct { + desc string + crl *certificateListExt + certs []*x509.Certificate + cert *x509.Certificate + errWant string + }{ + { + desc: "Pass intermediate", + crl: loadCRL(t, testdata.Path("crl/1.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "", + }, + { + desc: "Pass leaf", + crl: loadCRL(t, testdata.Path("crl/2.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[2], + errWant: "", + }, + { + desc: "Fail wrong cert chain", + crl: loadCRL(t, testdata.Path("crl/3.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/revokedInt.pem"))[1], + errWant: "No certificates mached", + }, + { + desc: "Fail no certs", + crl: loadCRL(t, testdata.Path("crl/1.crl")), + certs: []*x509.Certificate{}, + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "No certificates mached", + }, + { + desc: "Fail Tampered signature", + crl: tampered, + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "verification failure", + }, + } + + for _, tt := range verifyTests { + t.Run(tt.desc, func(t *testing.T) { + err := verifyCRL(tt.crl, tt.cert.RawIssuer, tt.certs) + switch { + case tt.errWant == "" && err != nil: + t.Errorf("Valid CRL did not verify err = %v", err) + case tt.errWant != "" && err == nil: + t.Error("Invalid CRL verified") + case tt.errWant != "" && !strings.Contains(err.Error(), tt.errWant): + t.Errorf("fetchIssuerCRL(_, %v, %v, _) = %v; want Contains(%v)", tt.cert.RawIssuer, tt.certs, err, tt.errWant) + } + }) + } +} + +func TestRevokedCert(t *testing.T) { + revokedIntChain := makeChain(t, testdata.Path("crl/revokedInt.pem")) + revokedLeafChain := makeChain(t, testdata.Path("crl/revokedLeaf.pem")) + validChain := makeChain(t, testdata.Path("crl/unrevoked.pem")) + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + var revocationTests = []struct { + desc string + in tls.ConnectionState + revoked bool + allowUndetermined bool + }{ + { + desc: "Single unrevoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain}}, + revoked: false, + }, + { + desc: "Single revoked intermediate", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedIntChain}}, + revoked: true, + }, + { + desc: "Single revoked leaf", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedLeafChain}}, + revoked: true, + }, + { + desc: "Multi one revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain, revokedLeafChain}}, + revoked: false, + }, + { + desc: "Multi revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedLeafChain, revokedIntChain}}, + revoked: true, + }, + { + desc: "Multi unrevoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain, validChain}}, + revoked: false, + }, + { + desc: "Undetermined revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{ + {&x509.Certificate{CRLDistributionPoints: []string{"test"}}}, + }}, + revoked: true, + }, + { + desc: "Undetermined allowed", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{ + {&x509.Certificate{CRLDistributionPoints: []string{"test"}}}, + }}, + revoked: false, + allowUndetermined: true, + }, + } + + for _, tt := range revocationTests { + t.Run(tt.desc, func(t *testing.T) { + err := CheckRevocation(tt.in, RevocationConfig{ + RootDir: testdata.Path("crl"), + AllowUndetermined: tt.allowUndetermined, + Cache: cache, + }) + t.Logf("CheckRevocation err = %v", err) + if tt.revoked && err == nil { + t.Error("Revoked certificate chain was allowed") + } else if !tt.revoked && err != nil { + t.Error("Unrevoked certificate not allowed") + } + }) + } +} + +func setupTLSConn(t *testing.T) (net.Listener, *x509.Certificate, *ecdsa.PrivateKey) { + t.Helper() + templ := x509.Certificate{ + SerialNumber: big.NewInt(5), + BasicConstraintsValid: true, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + Subject: pkix.Name{CommonName: "test-cert"}, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + IPAddresses: []net.IP{net.ParseIP("::1")}, + CRLDistributionPoints: []string{"http://static.corp.google.com/crl/campus-sln/borg"}, + } + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey failed err = %v", err) + } + rawCert, err := x509.CreateCertificate(rand.Reader, &templ, &templ, key.Public(), key) + if err != nil { + t.Fatalf("x509.CreateCertificate failed err = %v", err) + } + cert, err := x509.ParseCertificate(rawCert) + if err != nil { + t.Fatalf("x509.ParseCertificate failed err = %v", err) + } + + srvCfg := tls.Config{ + Certificates: []tls.Certificate{ + { + Certificate: [][]byte{cert.Raw}, + PrivateKey: key, + }, + }, + } + l, err := tls.Listen("tcp6", "[::1]:0", &srvCfg) + if err != nil { + t.Fatalf("tls.Listen failed err = %v", err) + } + return l, cert, key +} + +// TestVerifyConnection will setup a client/server connection and check revocation in the real TLS dialer +func TestVerifyConnection(t *testing.T) { + lis, cert, key := setupTLSConn(t) + defer func() { + lis.Close() + }() + + var handshakeTests = []struct { + desc string + revoked []pkix.RevokedCertificate + success bool + }{ + { + desc: "Empty CRL", + revoked: []pkix.RevokedCertificate{}, + success: true, + }, + { + desc: "Revoked Cert", + revoked: []pkix.RevokedCertificate{ + { + SerialNumber: cert.SerialNumber, + RevocationTime: time.Now(), + }, + }, + success: false, + }, + } + for _, tt := range handshakeTests { + t.Run(tt.desc, func(t *testing.T) { + // Accept one connection. + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("tls.Accept failed err = %v", err) + } else { + conn.Write([]byte("Hello, World!")) + conn.Close() + } + }() + + dir, err := os.MkdirTemp("", "crl_dir") + if err != nil { + t.Fatalf("os.MkdirTemp failed err = %v", err) + } + defer os.RemoveAll(dir) + + crl, err := cert.CreateCRL(rand.Reader, key, tt.revoked, time.Now(), time.Now().Add(time.Hour)) + if err != nil { + t.Fatalf("templ.CreateCRL failed err = %v", err) + } + + err = os.WriteFile(path.Join(dir, fmt.Sprintf("%s.r0", x509NameHash(cert.Subject.ToRDNSequence()))), crl, 0777) + if err != nil { + t.Fatalf("os.WriteFile failed err = %v", err) + } + + cp := x509.NewCertPool() + cp.AddCert(cert) + cliCfg := tls.Config{ + RootCAs: cp, + VerifyConnection: func(cs tls.ConnectionState) error { + return CheckRevocation(cs, RevocationConfig{RootDir: dir}) + }, + } + conn, err := tls.Dial(lis.Addr().Network(), lis.Addr().String(), &cliCfg) + t.Logf("tls.Dial err = %v", err) + if tt.success && err != nil { + t.Errorf("Expected success got err = %v", err) + } + if !tt.success && err == nil { + t.Error("Expected error, but got success") + } + if err == nil { + conn.Close() + } + }) + } +} + +func TestIssuerNonPrintableString(t *testing.T) { + rawIssuer, err := hex.DecodeString("300c310a300806022a030c023a29") + if err != nil { + t.Fatalf("failed to decode issuer: %s", err) + } + _, err = fetchCRL(rawIssuer, RevocationConfig{RootDir: testdata.Path("crl")}) + if err != nil { + t.Fatalf("fetchCRL failed: %s", err) + } +} + +// TestCRLCacheExpirationReloading tests the basic expiration and reloading of a +// cached CRL. The setup places an empty CRL in the cache, and a corresponding +// CRL with a revocation in the CRL directory. We then validate the certificate +// to verify that the certificate is not revoked. Then, we modify the +// NextUpdate time to be in the past so that when we next check for revocation, +// the existing cache entry should be seen as expired, and the CRL in the +// directory showing `revokedInt.pem` as revoked will be loaded, resulting in +// the check returning `RevocationRevoked`. +func TestCRLCacheExpirationReloading(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("Creating cache failed") + } + + var certs = makeChain(t, testdata.Path("crl/revokedInt.pem")) + // Certs[1] has the same issuer as the revoked cert + rawIssuer := certs[1].RawIssuer + + // `3.crl`` revokes `revokedInt.pem` + crl := loadCRL(t, testdata.Path("crl/3.crl")) + // Modify the crl so that the cert is NOT revoked and add it to the cache + crl.CertList.TBSCertList.RevokedCertificates = nil + crl.CertList.TBSCertList.NextUpdate = time.Now().Add(time.Hour) + cache.Add(hex.EncodeToString(rawIssuer), crl) + var cfg = RevocationConfig{RootDir: testdata.Path("crl"), Cache: cache} + revocationStatus := checkChain(certs, cfg) + if revocationStatus != RevocationUnrevoked { + t.Fatalf("Certificate check should be RevocationUnrevoked, was %v", revocationStatus) + } + + // Modify the entry in the cache so that the cache will be refreshed + crl.CertList.TBSCertList.NextUpdate = time.Now() + cache.Add(hex.EncodeToString(rawIssuer), crl) + + revocationStatus = checkChain(certs, cfg) + if revocationStatus != RevocationRevoked { + t.Fatalf("A certificate should have been `RevocationRevoked` but was %v", revocationStatus) + } +} diff --git a/security/advancedtls/crl_test.go b/security/advancedtls/crl_test.go new file mode 100644 index 000000000000..021f10d35ae5 --- /dev/null +++ b/security/advancedtls/crl_test.go @@ -0,0 +1,776 @@ +// TODO(@gregorycooke) - Remove when only golang 1.19+ is supported +//go:build go1.19 + +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package advancedtls + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "fmt" + "math/big" + "net" + "os" + "path" + "strings" + "testing" + "time" + + lru "github.com/hashicorp/golang-lru" + "google.golang.org/grpc/security/advancedtls/testdata" +) + +func TestX509NameHash(t *testing.T) { + nameTests := []struct { + in pkix.Name + out string + }{ + { + in: pkix.Name{ + Country: []string{"US"}, + Organization: []string{"Example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{"us"}, + Organization: []string{"example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{" us"}, + Organization: []string{"example"}, + }, + out: "9cdd41ff", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"BoringSSL"}, + }, + out: "c24414d9", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"BoringSSL"}, + }, + out: "c24414d9", + }, + { + in: pkix.Name{ + SerialNumber: "87f4514475ba0a2b", + }, + out: "9dc713cd", + }, + { + in: pkix.Name{ + Country: []string{"US"}, + Province: []string{"California"}, + Locality: []string{"Mountain View"}, + Organization: []string{"Google LLC"}, + OrganizationalUnit: []string{"Production", "campus-sln"}, + CommonName: "Root CA (2021-02-02T07:30:36-08:00)", + }, + out: "0b35a562", + }, + { + in: pkix.Name{ + ExtraNames: []pkix.AttributeTypeAndValue{ + {Type: asn1.ObjectIdentifier{5, 5, 5, 5}, Value: "aaaa"}, + }, + }, + out: "eea339da", + }, + } + for _, tt := range nameTests { + t.Run(tt.in.String(), func(t *testing.T) { + h := x509NameHash(tt.in.ToRDNSequence()) + if h != tt.out { + t.Errorf("x509NameHash(%v): Got %v wanted %v", tt.in, h, tt.out) + } + }) + } +} + +func TestUnsupportedCRLs(t *testing.T) { + crlBytesSomeReasons := []byte(`-----BEGIN X509 CRL----- +MIIEeDCCA2ACAQEwDQYJKoZIhvcNAQELBQAwQjELMAkGA1UEBhMCVVMxHjAcBgNV +BAoTFUdvb2dsZSBUcnVzdCBTZXJ2aWNlczETMBEGA1UEAxMKR1RTIENBIDFPMRcN +MjEwNDI2MTI1OTQxWhcNMjEwNTA2MTE1OTQwWjCCAn0wIgIRAPOOG3L4VLC7CAAA +AABxQgEXDTIxMDQxOTEyMTgxOFowIQIQUK0UwBZkVdQIAAAAAHFCBRcNMjEwNDE5 +MTIxODE4WjAhAhBRIXBJaKoQkQgAAAAAcULHFw0yMTA0MjAxMjE4MTdaMCICEQCv +qQWUq5UxmQgAAAAAcULMFw0yMTA0MjAxMjE4MTdaMCICEQDdv5k1kKwKTQgAAAAA +cUOQFw0yMTA0MjExMjE4MTZaMCICEQDGIEfR8N9sEAgAAAAAcUOWFw0yMTA0MjEx +MjE4MThaMCECEBHgbLXlj5yUCAAAAABxQ/IXDTIxMDQyMTIzMDAyNlowIQIQE1wT +2GGYqKwIAAAAAHFD7xcNMjEwNDIxMjMwMDI5WjAiAhEAo/bSyDjpVtsIAAAAAHFE +txcNMjEwNDIyMjMwMDI3WjAhAhARdCrSrHE0dAgAAAAAcUS/Fw0yMTA0MjIyMzAw +MjhaMCECEHONohfWn3wwCAAAAABxRX8XDTIxMDQyMzIzMDAyOVowIgIRAOYkiUPA +os4vCAAAAABxRYgXDTIxMDQyMzIzMDAyOFowIQIQRNTow5Eg2gEIAAAAAHFGShcN +MjEwNDI0MjMwMDI2WjAhAhBX32dH4/WQ6AgAAAAAcUZNFw0yMTA0MjQyMzAwMjZa +MCICEQDHnUM1vsaP/wgAAAAAcUcQFw0yMTA0MjUyMzAwMjZaMCECEEm5rvmL8sj6 +CAAAAABxRxQXDTIxMDQyNTIzMDAyN1owIQIQW16OQs4YQYkIAAAAAHFIABcNMjEw +NDI2MTI1NDA4WjAhAhAhSohpYsJtDQgAAAAAcUgEFw0yMTA0MjYxMjU0MDlaoGkw +ZzAfBgNVHSMEGDAWgBSY0fhuEOvPm+xgnxiQG6DrfQn9KzALBgNVHRQEBAICBngw +NwYDVR0cAQH/BC0wK6AmoCSGImh0dHA6Ly9jcmwucGtpLmdvb2cvR1RTMU8xY29y +ZS5jcmyBAf8wDQYJKoZIhvcNAQELBQADggEBADPBXbxVxMJ1HC7btXExRUpJHUlU +YbeCZGx6zj5F8pkopbmpV7cpewwhm848Fx4VaFFppZQZd92O08daEC6aEqoug4qF +z6ZrOLzhuKfpW8E93JjgL91v0FYN7iOcT7+ERKCwVEwEkuxszxs7ggW6OJYJNvHh +priIdmcPoiQ3ZrIRH0vE3BfUcNXnKFGATWuDkiRI0I4A5P7NiOf+lAuGZet3/eom +0chgts6sdau10GfeUpHUd4f8e93cS/QeLeG16z7LC8vRLstU3m3vrknpZbdGqSia +97w66mqcnQh9V0swZiEnVLmLufaiuDZJ+6nUzSvLqBlb/ei3T/tKV0BoKJA= +-----END X509 CRL-----`) + + crlBytesIndirect := []byte(`-----BEGIN X509 CRL----- +MIIDGjCCAgICAQEwDQYJKoZIhvcNAQELBQAwdjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAoTC1Rlc3RpbmcgTHRkMSowKAYDVQQLEyFU +ZXN0aW5nIEx0ZCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxEDAOBgNVBAMTB1Rlc3Qg +Q0EXDTIxMDExNjAyMjAxNloXDTIxMDEyMDA2MjAxNlowgfIwbAIBAhcNMjEwMTE2 +MDIyMDE2WjBYMAoGA1UdFQQDCgEEMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQG +EwNVU0ExDTALBgNVBAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0 +MTAgAgEDFw0yMTAxMTYwMjIwMTZaMAwwCgYDVR0VBAMKAQEwYAIBBBcNMjEwMTE2 +MDIyMDE2WjBMMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQGEwNVU0ExDTALBgNV +BAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0MqBjMGEwHwYDVR0j +BBgwFoAURJSDWAOfhGCryBjl8dsQjBitl3swCgYDVR0UBAMCAQEwMgYDVR0cAQH/ +BCgwJqAhoB+GHWh0dHA6Ly9jcmxzLnBraS5nb29nL3Rlc3QuY3JshAH/MA0GCSqG +SIb3DQEBCwUAA4IBAQBVXX67mr2wFPmEWCe6mf/wFnPl3xL6zNOl96YJtsd7ulcS +TEbdJpaUnWFQ23+Tpzdj/lI2aQhTg5Lvii3o+D8C5r/Jc5NhSOtVJJDI/IQLh4pG +NgGdljdbJQIT5D2Z71dgbq1ocxn8DefZIJjO3jp8VnAm7AIMX2tLTySzD2MpMeMq +XmcN4lG1e4nx+xjzp7MySYO42NRY3LkphVzJhu3dRBYhBKViRJxw9hLttChitJpF +6Kh6a0QzrEY/QDJGhE1VrAD2c5g/SKnHPDVoCWo4ACIICi76KQQSIWfIdp4W/SY3 +qsSIp8gfxSyzkJP+Ngkm2DdLjlJQCZ9R0MZP9Xj4 +-----END X509 CRL-----`) + + var tests = []struct { + desc string + in []byte + }{ + { + desc: "some reasons", + in: crlBytesSomeReasons, + }, + { + desc: "indirect", + in: crlBytesIndirect, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + crl, err := parseRevocationList(tt.in) + if err != nil { + t.Fatal(err) + } + if _, err := parseCRLExtensions(crl); err == nil { + t.Error("expected error got ok") + } + }) + } +} + +func TestCheckCertRevocation(t *testing.T) { + dummyCrlFile := []byte(`-----BEGIN X509 CRL----- +MIIDGjCCAgICAQEwDQYJKoZIhvcNAQELBQAwdjELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCkNhbGlmb3JuaWExFDASBgNVBAoTC1Rlc3RpbmcgTHRkMSowKAYDVQQLEyFU +ZXN0aW5nIEx0ZCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxEDAOBgNVBAMTB1Rlc3Qg +Q0EXDTIxMDExNjAyMjAxNloXDTIxMDEyMDA2MjAxNlowgfIwbAIBAhcNMjEwMTE2 +MDIyMDE2WjBYMAoGA1UdFQQDCgEEMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQG +EwNVU0ExDTALBgNVBAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0 +MTAgAgEDFw0yMTAxMTYwMjIwMTZaMAwwCgYDVR0VBAMKAQEwYAIBBBcNMjEwMTE2 +MDIyMDE2WjBMMEoGA1UdHQEB/wRAMD6kPDA6MQwwCgYDVQQGEwNVU0ExDTALBgNV +BAcTBGhlcmUxCzAJBgNVBAoTAnVzMQ4wDAYDVQQDEwVUZXN0MqBjMGEwHwYDVR0j +BBgwFoAURJSDWAOfhGCryBjl8dsQjBitl3swCgYDVR0UBAMCAQEwMgYDVR0cAQH/ +BCgwJqAhoB+GHWh0dHA6Ly9jcmxzLnBraS5nb29nL3Rlc3QuY3JshAH/MA0GCSqG +SIb3DQEBCwUAA4IBAQBVXX67mr2wFPmEWCe6mf/wFnPl3xL6zNOl96YJtsd7ulcS +TEbdJpaUnWFQ23+Tpzdj/lI2aQhTg5Lvii3o+D8C5r/Jc5NhSOtVJJDI/IQLh4pG +NgGdljdbJQIT5D2Z71dgbq1ocxn8DefZIJjO3jp8VnAm7AIMX2tLTySzD2MpMeMq +XmcN4lG1e4nx+xjzp7MySYO42NRY3LkphVzJhu3dRBYhBKViRJxw9hLttChitJpF +6Kh6a0QzrEY/QDJGhE1VrAD2c5g/SKnHPDVoCWo4ACIICi76KQQSIWfIdp4W/SY3 +qsSIp8gfxSyzkJP+Ngkm2DdLjlJQCZ9R0MZP9Xj4 +-----END X509 CRL-----`) + crl, err := parseRevocationList(dummyCrlFile) + if err != nil { + t.Fatalf("parseRevocationList(dummyCrlFile) failed: %v", err) + } + crlExt := &certificateListExt{CertList: crl} + var crlIssuer pkix.Name = crl.Issuer + + var revocationTests = []struct { + desc string + in x509.Certificate + revoked RevocationStatus + }{ + { + desc: "Single revoked", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test1", + }, + SerialNumber: big.NewInt(2), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Revoked no entry issuer", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test1", + }, + SerialNumber: big.NewInt(3), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Revoked new entry issuer", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test2", + }, + SerialNumber: big.NewInt(4), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationRevoked, + }, + { + desc: "Single unrevoked", + in: x509.Certificate{ + Issuer: pkix.Name{ + Country: []string{"USA"}, + Locality: []string{"here"}, + Organization: []string{"us"}, + CommonName: "Test2", + }, + SerialNumber: big.NewInt(1), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationUnrevoked, + }, + { + desc: "Single unrevoked Issuer", + in: x509.Certificate{ + Issuer: crlIssuer, + SerialNumber: big.NewInt(2), + CRLDistributionPoints: []string{"test"}, + }, + revoked: RevocationUnrevoked, + }, + } + + for _, tt := range revocationTests { + rawIssuer, err := asn1.Marshal(tt.in.Issuer.ToRDNSequence()) + if err != nil { + t.Fatalf("asn1.Marshal(%v) failed: %v", tt.in.Issuer.ToRDNSequence(), err) + } + tt.in.RawIssuer = rawIssuer + t.Run(tt.desc, func(t *testing.T) { + rev, err := checkCertRevocation(&tt.in, crlExt) + if err != nil { + t.Errorf("checkCertRevocation(%v) err = %v", tt.in.Issuer, err) + } else if rev != tt.revoked { + t.Errorf("checkCertRevocation(%v(%v)) returned %v wanted %v", + tt.in.Issuer, tt.in.SerialNumber, rev, tt.revoked) + } + }) + } +} + +func makeChain(t *testing.T, name string) []*x509.Certificate { + t.Helper() + + certChain := make([]*x509.Certificate, 0) + + rest, err := os.ReadFile(name) + if err != nil { + t.Fatalf("os.ReadFile(%v) failed %v", name, err) + } + for len(rest) > 0 { + var block *pem.Block + block, rest = pem.Decode(rest) + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatalf("ParseCertificate error %v", err) + } + t.Logf("Parsed Cert sub = %v iss = %v", c.Subject, c.Issuer) + certChain = append(certChain, c) + } + return certChain +} + +func loadCRL(t *testing.T, path string) *certificateListExt { + b, err := os.ReadFile(path) + if err != nil { + t.Fatalf("readFile(%v) failed err = %v", path, err) + } + crl, err := parseRevocationList(b) + if err != nil { + t.Fatalf("parseCrl(%v) failed err = %v", path, err) + } + crlExt, err := parseCRLExtensions(crl) + if err != nil { + t.Fatalf("parseCRLExtensions(%v) failed err = %v", path, err) + } + crlExt.RawIssuer, err = extractCRLIssuer(b) + if err != nil { + t.Fatalf("extractCRLIssuer(%v) failed err= %v", path, err) + } + return crlExt +} + +func TestCachedCRL(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + tests := []struct { + desc string + val interface{} + ok bool + }{ + { + desc: "Valid", + val: &certificateListExt{ + CertList: &x509.RevocationList{ + NextUpdate: time.Now().Add(time.Hour), + }}, + ok: true, + }, + { + desc: "Expired", + val: &certificateListExt{ + CertList: &x509.RevocationList{ + NextUpdate: time.Now().Add(-time.Hour), + }}, + ok: false, + }, + { + desc: "Wrong Type", + val: "string", + ok: false, + }, + { + desc: "Empty", + val: nil, + ok: false, + }, + } + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + if tt.val != nil { + cache.Add(hex.EncodeToString([]byte(tt.desc)), tt.val) + } + _, ok := cachedCrl([]byte(tt.desc), cache) + if tt.ok != ok { + t.Errorf("Cache ok error expected %v vs %v", tt.ok, ok) + } + }) + } +} + +func TestGetIssuerCRLCache(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + tests := []struct { + desc string + rawIssuer []byte + certs []*x509.Certificate + }{ + { + desc: "Valid", + rawIssuer: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1].RawIssuer, + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + }, + { + desc: "Unverified", + rawIssuer: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1].RawIssuer, + }, + { + desc: "Not Found", + rawIssuer: []byte("not_found"), + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + cache.Purge() + _, err := fetchIssuerCRL(tt.rawIssuer, tt.certs, RevocationConfig{ + RootDir: testdata.Path("."), + Cache: cache, + }) + if err == nil && cache.Len() == 0 { + t.Error("Verified CRL not added to cache") + } + if err != nil && cache.Len() != 0 { + t.Error("Unverified CRL added to cache") + } + }) + } +} + +func TestVerifyCrl(t *testing.T) { + tampered := loadCRL(t, testdata.Path("crl/1.crl")) + // Change the signature so it won't verify + tampered.CertList.Signature[0]++ + + verifyTests := []struct { + desc string + crl *certificateListExt + certs []*x509.Certificate + cert *x509.Certificate + errWant string + }{ + { + desc: "Pass intermediate", + crl: loadCRL(t, testdata.Path("crl/1.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "", + }, + { + desc: "Pass leaf", + crl: loadCRL(t, testdata.Path("crl/2.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[2], + errWant: "", + }, + { + desc: "Fail wrong cert chain", + crl: loadCRL(t, testdata.Path("crl/3.crl")), + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/revokedInt.pem"))[1], + errWant: "No certificates mached", + }, + { + desc: "Fail no certs", + crl: loadCRL(t, testdata.Path("crl/1.crl")), + certs: []*x509.Certificate{}, + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "No certificates mached", + }, + { + desc: "Fail Tampered signature", + crl: tampered, + certs: makeChain(t, testdata.Path("crl/unrevoked.pem")), + cert: makeChain(t, testdata.Path("crl/unrevoked.pem"))[1], + errWant: "verification failure", + }, + } + + for _, tt := range verifyTests { + t.Run(tt.desc, func(t *testing.T) { + err := verifyCRL(tt.crl, tt.cert.RawIssuer, tt.certs) + switch { + case tt.errWant == "" && err != nil: + t.Errorf("Valid CRL did not verify err = %v", err) + case tt.errWant != "" && err == nil: + t.Error("Invalid CRL verified") + case tt.errWant != "" && !strings.Contains(err.Error(), tt.errWant): + t.Errorf("fetchIssuerCRL(_, %v, %v, _) = %v; want Contains(%v)", tt.cert.RawIssuer, tt.certs, err, tt.errWant) + } + }) + } +} + +func TestRevokedCert(t *testing.T) { + revokedIntChain := makeChain(t, testdata.Path("crl/revokedInt.pem")) + revokedLeafChain := makeChain(t, testdata.Path("crl/revokedLeaf.pem")) + validChain := makeChain(t, testdata.Path("crl/unrevoked.pem")) + cache, err := lru.New(5) + if err != nil { + t.Fatalf("lru.New: err = %v", err) + } + + var revocationTests = []struct { + desc string + in tls.ConnectionState + revoked bool + allowUndetermined bool + }{ + { + desc: "Single unrevoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain}}, + revoked: false, + }, + { + desc: "Single revoked intermediate", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedIntChain}}, + revoked: true, + }, + { + desc: "Single revoked leaf", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedLeafChain}}, + revoked: true, + }, + { + desc: "Multi one revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain, revokedLeafChain}}, + revoked: false, + }, + { + desc: "Multi revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{revokedLeafChain, revokedIntChain}}, + revoked: true, + }, + { + desc: "Multi unrevoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{validChain, validChain}}, + revoked: false, + }, + { + desc: "Undetermined revoked", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{ + {&x509.Certificate{CRLDistributionPoints: []string{"test"}}}, + }}, + revoked: true, + }, + { + desc: "Undetermined allowed", + in: tls.ConnectionState{VerifiedChains: [][]*x509.Certificate{ + {&x509.Certificate{CRLDistributionPoints: []string{"test"}}}, + }}, + revoked: false, + allowUndetermined: true, + }, + } + + for _, tt := range revocationTests { + t.Run(tt.desc, func(t *testing.T) { + err := CheckRevocation(tt.in, RevocationConfig{ + RootDir: testdata.Path("crl"), + AllowUndetermined: tt.allowUndetermined, + Cache: cache, + }) + t.Logf("CheckRevocation err = %v", err) + if tt.revoked && err == nil { + t.Error("Revoked certificate chain was allowed") + } else if !tt.revoked && err != nil { + t.Error("Unrevoked certificate not allowed") + } + }) + } +} + +func setupTLSConn(t *testing.T) (net.Listener, *x509.Certificate, *ecdsa.PrivateKey) { + t.Helper() + templ := x509.Certificate{ + SerialNumber: big.NewInt(5), + BasicConstraintsValid: true, + NotBefore: time.Now().Add(-time.Hour), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + Subject: pkix.Name{CommonName: "test-cert"}, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + IPAddresses: []net.IP{net.ParseIP("::1")}, + CRLDistributionPoints: []string{"http://static.corp.google.com/crl/campus-sln/borg"}, + } + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("ecdsa.GenerateKey failed err = %v", err) + } + rawCert, err := x509.CreateCertificate(rand.Reader, &templ, &templ, key.Public(), key) + if err != nil { + t.Fatalf("x509.CreateCertificate failed err = %v", err) + } + cert, err := x509.ParseCertificate(rawCert) + if err != nil { + t.Fatalf("x509.ParseCertificate failed err = %v", err) + } + + srvCfg := tls.Config{ + Certificates: []tls.Certificate{ + { + Certificate: [][]byte{cert.Raw}, + PrivateKey: key, + }, + }, + } + l, err := tls.Listen("tcp6", "[::1]:0", &srvCfg) + if err != nil { + t.Fatalf("tls.Listen failed err = %v", err) + } + return l, cert, key +} + +// TestVerifyConnection will setup a client/server connection and check revocation in the real TLS dialer +func TestVerifyConnection(t *testing.T) { + lis, cert, key := setupTLSConn(t) + defer func() { + lis.Close() + }() + + var handshakeTests = []struct { + desc string + revoked []pkix.RevokedCertificate + success bool + }{ + { + desc: "Empty CRL", + revoked: []pkix.RevokedCertificate{}, + success: true, + }, + { + desc: "Revoked Cert", + revoked: []pkix.RevokedCertificate{ + { + SerialNumber: cert.SerialNumber, + RevocationTime: time.Now(), + }, + }, + success: false, + }, + } + for _, tt := range handshakeTests { + t.Run(tt.desc, func(t *testing.T) { + // Accept one connection. + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("tls.Accept failed err = %v", err) + } else { + conn.Write([]byte("Hello, World!")) + conn.Close() + } + }() + + dir, err := os.MkdirTemp("", "crl_dir") + if err != nil { + t.Fatalf("os.MkdirTemp failed err = %v", err) + } + defer os.RemoveAll(dir) + + template := &x509.RevocationList{ + RevokedCertificates: tt.revoked, + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(time.Hour), + Number: big.NewInt(1), + } + crl, err := x509.CreateRevocationList(rand.Reader, template, cert, key) + if err != nil { + t.Fatalf("templ.CreateRevocationList failed err = %v", err) + } + + err = os.WriteFile(path.Join(dir, fmt.Sprintf("%s.r0", x509NameHash(cert.Subject.ToRDNSequence()))), crl, 0777) + if err != nil { + t.Fatalf("os.WriteFile failed err = %v", err) + } + + cp := x509.NewCertPool() + cp.AddCert(cert) + cliCfg := tls.Config{ + RootCAs: cp, + VerifyConnection: func(cs tls.ConnectionState) error { + return CheckRevocation(cs, RevocationConfig{RootDir: dir}) + }, + } + conn, err := tls.Dial(lis.Addr().Network(), lis.Addr().String(), &cliCfg) + t.Logf("tls.Dial err = %v", err) + if tt.success && err != nil { + t.Errorf("Expected success got err = %v", err) + } + if !tt.success && err == nil { + t.Error("Expected error, but got success") + } + if err == nil { + conn.Close() + } + }) + } +} + +func TestIssuerNonPrintableString(t *testing.T) { + rawIssuer, err := hex.DecodeString("300c310a300806022a030c023a29") + if err != nil { + t.Fatalf("failed to decode issuer: %s", err) + } + _, err = fetchCRL(rawIssuer, RevocationConfig{RootDir: testdata.Path("crl")}) + if err != nil { + t.Fatalf("fetchCRL failed: %s", err) + } +} + +// TestCRLCacheExpirationReloading tests the basic expiration and reloading of a +// cached CRL. The setup places an empty CRL in the cache, and a corresponding +// CRL with a revocation in the CRL directory. We then validate the certificate +// to verify that the certificate is not revoked. Then, we modify the +// NextUpdate time to be in the past so that when we next check for revocation, +// the existing cache entry should be seen as expired, and the CRL in the +// directory showing `revokedInt.pem` as revoked will be loaded, resulting in +// the check returning `RevocationRevoked`. +func TestCRLCacheExpirationReloading(t *testing.T) { + cache, err := lru.New(5) + if err != nil { + t.Fatalf("Creating cache failed") + } + + var certs = makeChain(t, testdata.Path("crl/revokedInt.pem")) + // Certs[1] has the same issuer as the revoked cert + rawIssuer := certs[1].RawIssuer + + // `3.crl`` revokes `revokedInt.pem` + crl := loadCRL(t, testdata.Path("crl/3.crl")) + // Modify the crl so that the cert is NOT revoked and add it to the cache + crl.CertList.RevokedCertificates = nil + crl.CertList.NextUpdate = time.Now().Add(time.Hour) + cache.Add(hex.EncodeToString(rawIssuer), crl) + var cfg = RevocationConfig{RootDir: testdata.Path("crl"), Cache: cache} + revocationStatus := checkChain(certs, cfg) + if revocationStatus != RevocationUnrevoked { + t.Fatalf("Certificate check should be RevocationUnrevoked, was %v", revocationStatus) + } + + // Modify the entry in the cache so that the cache will be refreshed + crl.CertList.NextUpdate = time.Now() + cache.Add(hex.EncodeToString(rawIssuer), crl) + + revocationStatus = checkChain(certs, cfg) + if revocationStatus != RevocationRevoked { + t.Fatalf("A certificate should have been `RevocationRevoked` but was %v", revocationStatus) + } +} diff --git a/security/advancedtls/examples/go.mod b/security/advancedtls/examples/go.mod index 936aa476893b..f59f6f900251 100644 --- a/security/advancedtls/examples/go.mod +++ b/security/advancedtls/examples/go.mod @@ -1,11 +1,21 @@ module google.golang.org/grpc/security/advancedtls/examples -go 1.15 +go 1.17 require ( - google.golang.org/grpc v1.33.1 - google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b - google.golang.org/grpc/security/advancedtls v0.0.0-20201112215255-90f1b3ee835b + google.golang.org/grpc v1.54.0 + google.golang.org/grpc/examples v0.0.0-20230418213844-0ed709c4a71d + google.golang.org/grpc/security/advancedtls v0.0.0-20230418213844-0ed709c4a71d +) + +require ( + github.com/golang/protobuf v1.5.3 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/protobuf v1.30.0 // indirect ) replace google.golang.org/grpc => ../../.. diff --git a/security/advancedtls/examples/go.sum b/security/advancedtls/examples/go.sum index 519267dbc278..91c31e699877 100644 --- a/security/advancedtls/examples/go.sum +++ b/security/advancedtls/examples/go.sum @@ -1,71 +1,21 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/security/advancedtls/go.mod b/security/advancedtls/go.mod index be35029503da..928f86fff832 100644 --- a/security/advancedtls/go.mod +++ b/security/advancedtls/go.mod @@ -1,13 +1,23 @@ module google.golang.org/grpc/security/advancedtls -go 1.14 +go 1.17 require ( - github.com/google/go-cmp v0.5.1 // indirect - google.golang.org/grpc v1.31.0 + github.com/hashicorp/golang-lru v0.5.4 + golang.org/x/crypto v0.8.0 + google.golang.org/grpc v1.54.0 google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b ) +require ( + github.com/golang/protobuf v1.5.3 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) + replace google.golang.org/grpc => ../../ replace google.golang.org/grpc/examples => ../../examples diff --git a/security/advancedtls/go.sum b/security/advancedtls/go.sum index 519267dbc278..2766a3d8223a 100644 --- a/security/advancedtls/go.sum +++ b/security/advancedtls/go.sum @@ -1,71 +1,22 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= diff --git a/security/advancedtls/internal/testutils/testutils.go b/security/advancedtls/internal/testutils/testutils.go index a2c048882b7a..1bc0dc3bf4e2 100644 --- a/security/advancedtls/internal/testutils/testutils.go +++ b/security/advancedtls/internal/testutils/testutils.go @@ -22,7 +22,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" + "os" "google.golang.org/grpc/security/advancedtls/testdata" ) @@ -58,7 +58,7 @@ type CertStore struct { } func readTrustCert(fileName string) (*x509.CertPool, error) { - trustData, err := ioutil.ReadFile(fileName) + trustData, err := os.ReadFile(fileName) if err != nil { return nil, err } diff --git a/security/advancedtls/sni.go b/security/advancedtls/sni.go index 120acf2b376d..3e7befb1f904 100644 --- a/security/advancedtls/sni.go +++ b/security/advancedtls/sni.go @@ -1,5 +1,3 @@ -// +build !appengine,go1.14 - /* * * Copyright 2020 gRPC authors. diff --git a/security/advancedtls/sni_beforego114.go b/security/advancedtls/sni_beforego114.go deleted file mode 100644 index 26a09b988491..000000000000 --- a/security/advancedtls/sni_beforego114.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build !appengine,!go1.14 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package advancedtls - -import ( - "crypto/tls" - "fmt" -) - -// buildGetCertificates returns the first cert contained in ServerOptions for -// non-appengine builds before version 1.4. -func buildGetCertificates(clientHello *tls.ClientHelloInfo, o *ServerOptions) (*tls.Certificate, error) { - if o.IdentityOptions.GetIdentityCertificatesForServer == nil { - return nil, fmt.Errorf("function GetCertificates must be specified") - } - certificates, err := o.IdentityOptions.GetIdentityCertificatesForServer(clientHello) - if err != nil { - return nil, err - } - if len(certificates) == 0 { - return nil, fmt.Errorf("no certificates configured") - } - return certificates[0], nil -} diff --git a/security/advancedtls/testdata/crl/0b35a562.r0 b/security/advancedtls/testdata/crl/0b35a562.r0 new file mode 120000 index 000000000000..1a84eabdfc72 --- /dev/null +++ b/security/advancedtls/testdata/crl/0b35a562.r0 @@ -0,0 +1 @@ +5.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/0b35a562.r1 b/security/advancedtls/testdata/crl/0b35a562.r1 new file mode 120000 index 000000000000..6e6f10978918 --- /dev/null +++ b/security/advancedtls/testdata/crl/0b35a562.r1 @@ -0,0 +1 @@ +1.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/1.crl b/security/advancedtls/testdata/crl/1.crl new file mode 100644 index 000000000000..5b12ded4a66f --- /dev/null +++ b/security/advancedtls/testdata/crl/1.crl @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBYDCCAQYCAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAyMS0wMi0wMlQwNzozMDozNi0wODowMCkX +DTIxMDIwMjE1MzAzNloXDTIxMDIwOTE1MzAzNlqgLzAtMB8GA1UdIwQYMBaAFPQN +tnCIBcG4ReQgoVi0kPgTROseMAoGA1UdFAQDAgEAMAoGCCqGSM49BAMCA0gAMEUC +IQDB9WEPBPHEo5xjCv8CT9okockJJnkLDOus6FypVLqj5QIgYw9/PYLwb41/Uc+4 +LLTAsfdDWh7xBJmqvVQglMoJOEc= +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/1ab871c8.r0 b/security/advancedtls/testdata/crl/1ab871c8.r0 new file mode 120000 index 000000000000..f2cd877e7edb --- /dev/null +++ b/security/advancedtls/testdata/crl/1ab871c8.r0 @@ -0,0 +1 @@ +2.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/2.crl b/security/advancedtls/testdata/crl/2.crl new file mode 100644 index 000000000000..5ca9afd71419 --- /dev/null +++ b/security/advancedtls/testdata/crl/2.crl @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBYDCCAQYCAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjbm9kZSBDQSAoMjAyMS0wMi0wMlQwNzozMDozNi0wODowMCkX +DTIxMDIwMjE1MzAzNloXDTIxMDIwOTE1MzAzNlqgLzAtMB8GA1UdIwQYMBaAFBjo +V5Jnk/gp1k7fmWwkvTk/cF/IMAoGA1UdFAQDAgEAMAoGCCqGSM49BAMCA0gAMEUC +IQDgjA1Vj/pNFtNRL0vFEdapmFoArHM2+rn4IiP8jYLsCAIgAj2KEHbbtJ3zl5XP +WVW6ZyW7r3wIX+Bt3vLJWPrQtf8= +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/2f11f022.r0 b/security/advancedtls/testdata/crl/2f11f022.r0 new file mode 100644 index 000000000000..e570f17ee2a5 --- /dev/null +++ b/security/advancedtls/testdata/crl/2f11f022.r0 @@ -0,0 +1,7 @@ +-----BEGIN X509 CRL----- +MIHnMFICAQEwDQYJKoZIhvcNAQEMBQAwDDEKMAgGAioDDAI6KRcNMDkxMTEwMjMw +MDAwWhcNMDkxMTExMDAwMDAwWqASMBAwDgYDVR0jBAcwBYADAQIDMA0GCSqGSIb3 +DQEBDAUAA4GBAMl2sjOjtOQ+OCsRyjM0IvqTn7lmdGJMvpYAym367JBamJPCbYrL +MifCjCA1ra7gG0MweZbpm4SG2YLakwi1/B+XhApQ5VVv5SwDn6Yy5zr9ePLEF7Iy +sP86e9s5XfOusLTW+Spre8q1vi7pJrRvUxhJGuUuLoM6Uhvh65ViilDJ +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/3.crl b/security/advancedtls/testdata/crl/3.crl new file mode 100644 index 000000000000..d37ad2247f59 --- /dev/null +++ b/security/advancedtls/testdata/crl/3.crl @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBiDCCAS8CAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAyMS0wMi0wMlQwNzozMTo1NC0wODowMCkX +DTIxMDIwMjE1MzE1NFoXDTIxMDIwOTE1MzE1NFowJzAlAhQAroEYW855BRqTrlov +5cBCGvkutxcNMjEwMjAyMTUzMTU0WqAvMC0wHwYDVR0jBBgwFoAUeq/TQ959KbWk +/um08jSTXogXpWUwCgYDVR0UBAMCAQEwCgYIKoZIzj0EAwIDRwAwRAIgaSOIhJDg +wOLYlbXkmxW0cqy/AfOUNYbz5D/8/FfvhosCICftg7Vzlu0Nh83jikyjy+wtkiJt +ZYNvGFQ3Sp2L3A9e +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/4.crl b/security/advancedtls/testdata/crl/4.crl new file mode 100644 index 000000000000..d4ee6f7cf186 --- /dev/null +++ b/security/advancedtls/testdata/crl/4.crl @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBYDCCAQYCAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjbm9kZSBDQSAoMjAyMS0wMi0wMlQwNzozMTo1NC0wODowMCkX +DTIxMDIwMjE1MzE1NFoXDTIxMDIwOTE1MzE1NFqgLzAtMB8GA1UdIwQYMBaAFIVn +8tIFgZpIdhomgYJ2c5ULLzpSMAoGA1UdFAQDAgEAMAoGCCqGSM49BAMCA0gAMEUC +ICupTvOqgAyRa1nn7+Pe/1vvlJPAQ8gUfTQsQ6XX3v6oAiEA08B2PsK6aTEwzjry +pXqhlUNZFzgaXrVVQuEJbyJ1qoU= +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/5.crl b/security/advancedtls/testdata/crl/5.crl new file mode 100644 index 000000000000..d1c24f0f25a3 --- /dev/null +++ b/security/advancedtls/testdata/crl/5.crl @@ -0,0 +1,10 @@ +-----BEGIN X509 CRL----- +MIIBXzCCAQYCAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAyMS0wMi0wMlQwNzozMjo1Ny0wODowMCkX +DTIxMDIwMjE1MzI1N1oXDTIxMDIwOTE1MzI1N1qgLzAtMB8GA1UdIwQYMBaAFN+g +xTAtSTlb5Qqvrbp4rZtsaNzqMAoGA1UdFAQDAgEAMAoGCCqGSM49BAMCA0cAMEQC +IHrRKjieY7w7gxvpkJAdszPZBlaSSp/c9wILutBTy7SyAiAwhaHfgas89iRfaBs2 +EhGIeK39A+kSzqu6qEQBHpK36g== +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/6.crl b/security/advancedtls/testdata/crl/6.crl new file mode 100644 index 000000000000..87ef378f6aba --- /dev/null +++ b/security/advancedtls/testdata/crl/6.crl @@ -0,0 +1,11 @@ +-----BEGIN X509 CRL----- +MIIBiDCCAS8CAQEwCgYIKoZIzj0EAwIwgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQI +EwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpH +b29nbGUgTExDMSYwEQYDVQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNs +bjEsMCoGA1UEAxMjbm9kZSBDQSAoMjAyMS0wMi0wMlQwNzozMjo1Ny0wODowMCkX +DTIxMDIwMjE1MzI1N1oXDTIxMDIwOTE1MzI1N1owJzAlAhQAxSe/pGmyvzN7mxm5 +6ZJTYUXYuhcNMjEwMjAyMTUzMjU3WqAvMC0wHwYDVR0jBBgwFoAUpZ30UJXB4lI9 +j2SzodCtRFckrRcwCgYDVR0UBAMCAQEwCgYIKoZIzj0EAwIDRwAwRAIgRg3u7t3b +oyV5FhMuGGzWnfIwnKclpT8imnp8tEN253sCIFUY7DjiDohwu4Zup3bWs1OaZ3q3 +cm+j0H/oe8zzCAgp +-----END X509 CRL----- diff --git a/security/advancedtls/testdata/crl/71eac5a2.r0 b/security/advancedtls/testdata/crl/71eac5a2.r0 new file mode 120000 index 000000000000..9f37924cae0c --- /dev/null +++ b/security/advancedtls/testdata/crl/71eac5a2.r0 @@ -0,0 +1 @@ +4.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/7a1799af.r0 b/security/advancedtls/testdata/crl/7a1799af.r0 new file mode 120000 index 000000000000..f34df5b59c01 --- /dev/null +++ b/security/advancedtls/testdata/crl/7a1799af.r0 @@ -0,0 +1 @@ +3.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/8828a7e6.r0 b/security/advancedtls/testdata/crl/8828a7e6.r0 new file mode 120000 index 000000000000..70bead214cc3 --- /dev/null +++ b/security/advancedtls/testdata/crl/8828a7e6.r0 @@ -0,0 +1 @@ +6.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/README.md b/security/advancedtls/testdata/crl/README.md new file mode 100644 index 000000000000..00cb09c31928 --- /dev/null +++ b/security/advancedtls/testdata/crl/README.md @@ -0,0 +1,48 @@ +# CRL Test Data + +This directory contains cert chains and CRL files for revocation testing. + +To print the chain, use a command like, + +```shell +openssl crl2pkcs7 -nocrl -certfile security/crl/x509/client/testdata/revokedLeaf.pem | openssl pkcs7 -print_certs -text -noout +``` + +The crl file symlinks are generated with `openssl rehash` + +## unrevoked.pem + +A certificate chain with CRL files and unrevoked certs + +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=Root CA (2021-02-02T07:30:36-08:00) + * 1.crl + +NOTE: 1.crl file is symlinked with 5.crl to simulate two issuers that hash to +the same value to test that loading multiple files works. + +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=node CA (2021-02-02T07:30:36-08:00) + * 2.crl + +## revokedInt.pem + +Certificate chain where the intermediate is revoked + +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=Root CA (2021-02-02T07:31:54-08:00) + * 3.crl +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=node CA (2021-02-02T07:31:54-08:00) + * 4.crl + +## revokedLeaf.pem + +Certificate chain where the leaf is revoked + +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=Root CA (2021-02-02T07:32:57-08:00) + * 5.crl +* Subject: C=US, ST=California, L=Mountain View, O=Google LLC, OU=Production, + OU=campus-sln, CN=node CA (2021-02-02T07:32:57-08:00) + * 6.crl diff --git a/security/advancedtls/testdata/crl/deee447d.r0 b/security/advancedtls/testdata/crl/deee447d.r0 new file mode 120000 index 000000000000..1a84eabdfc72 --- /dev/null +++ b/security/advancedtls/testdata/crl/deee447d.r0 @@ -0,0 +1 @@ +5.crl \ No newline at end of file diff --git a/security/advancedtls/testdata/crl/revokedInt.pem b/security/advancedtls/testdata/crl/revokedInt.pem new file mode 100644 index 000000000000..8b7282ff8221 --- /dev/null +++ b/security/advancedtls/testdata/crl/revokedInt.pem @@ -0,0 +1,58 @@ +-----BEGIN CERTIFICATE----- +MIIDAzCCAqmgAwIBAgITAWjKwm2dNQvkO62Jgyr5rAvVQzAKBggqhkjOPQQDAjCB +pTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDU1v +dW50YWluIFZpZXcxEzARBgNVBAoTCkdvb2dsZSBMTEMxJjARBgNVBAsTClByb2R1 +Y3Rpb24wEQYDVQQLEwpjYW1wdXMtc2xuMSwwKgYDVQQDEyNSb290IENBICgyMDIx +LTAyLTAyVDA3OjMxOjU0LTA4OjAwKTAgFw0yMTAyMDIxNTMxNTRaGA85OTk5MTIz +MTIzNTk1OVowgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYw +FAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYD +VQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9v +dCBDQSAoMjAyMS0wMi0wMlQwNzozMTo1NC0wODowMCkwWTATBgcqhkjOPQIBBggq +hkjOPQMBBwNCAAQhA0/puhTtSxbVVHseVhL2z7QhpPyJs5Q4beKi7tpaYRDmVn6p +Phh+jbRzg8Qj4gKI/Q1rrdm4rKer63LHpdWdo4GzMIGwMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUeq/TQ959KbWk/um08jSTXogXpWUwHwYDVR0jBBgwFoAUeq/T +Q959KbWk/um08jSTXogXpWUwLgYDVR0RBCcwJYYjc3BpZmZlOi8vY2FtcHVzLXNs +bi5wcm9kLmdvb2dsZS5jb20wCgYIKoZIzj0EAwIDSAAwRQIgOSQZvyDPQwVOWnpF +zWvI+DS2yXIj/2T2EOvJz2XgcK4CIQCL0mh/+DxLiO4zzbInKr0mxpGSxSeZCUk7 +1ZF7AeLlbw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDizCCAzKgAwIBAgIUAK6BGFvOeQUak65aL+XAQhr5LrcwCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAy +MS0wMi0wMlQwNzozMTo1NC0wODowMCkwIBcNMjEwMjAyMTUzMTU0WhgPOTk5OTEy +MzEyMzU5NTlaMIGlMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEW +MBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UEChMKR29vZ2xlIExMQzEmMBEG +A1UECxMKUHJvZHVjdGlvbjARBgNVBAsTCmNhbXB1cy1zbG4xLDAqBgNVBAMTI25v +ZGUgQ0EgKDIwMjEtMDItMDJUMDc6MzE6NTQtMDg6MDApMFkwEwYHKoZIzj0CAQYI +KoZIzj0DAQcDQgAEye6UOlBos8Q3FFBiLahD9BaLTA18bO4MTPyv35T3lppvxD5X +U/AnEllOnx5OMtMjMBbIQjSkMbiQ9xNXoSqB6aOCATowggE2MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUhWfy0gWBmkh2GiaBgnZzlQsvOlIwHwYDVR0jBBgwFoAU +eq/TQ959KbWk/um08jSTXogXpWUwMwYDVR0RBCwwKoYoc3BpZmZlOi8vbm9kZS5j +YW1wdXMtc2xuLnByb2QuZ29vZ2xlLmNvbTA7BgNVHR4BAf8EMTAvoC0wK4YpY3Nj +cy10ZWFtLm5vZGUuY2FtcHVzLXNsbi5wcm9kLmdvb2dsZS5jb20wQgYDVR0fBDsw +OTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2NhbXB1 +cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNHADBEAiA79rPu6ZO1/0qB6RxL7jVz1200 +UTo8ioB4itbTzMnJqAIgJqp/Rc8OhpsfzQX8XnIIkl+SewT+tOxJT1MHVNMlVhc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC0DCCAnWgAwIBAgITXQ2c/C27OGqk4Pbu+MNJlOtpYTAKBggqhkjOPQQDAjCB +pTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDU1v +dW50YWluIFZpZXcxEzARBgNVBAoTCkdvb2dsZSBMTEMxJjARBgNVBAsTClByb2R1 +Y3Rpb24wEQYDVQQLEwpjYW1wdXMtc2xuMSwwKgYDVQQDEyNub2RlIENBICgyMDIx +LTAyLTAyVDA3OjMxOjU0LTA4OjAwKTAgFw0yMTAyMDIxNTMxNTRaGA85OTk5MTIz +MTIzNTk1OVowADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABN2/1le5d3hS/piw +hrNMHjd7gPEjzXwtuXQTzdV+aaeOf3ldnC6OnEF/bggym9MldQSJZLXPYSaoj430 +Vu5PRNejggEkMIIBIDAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYwFAYIKwYBBQUH +AwIGCCsGAQUFBwMBMB0GA1UdDgQWBBTEewP3JgrJPekWWGGjChVqaMhaqTAfBgNV +HSMEGDAWgBSFZ/LSBYGaSHYaJoGCdnOVCy86UjBrBgNVHREBAf8EYTBfghZqemFi +MTIucHJvZC5nb29nbGUuY29thkVzcGlmZmU6Ly9jc2NzLXRlYW0ubm9kZS5jYW1w +dXMtc2xuLnByb2QuZ29vZ2xlLmNvbS9yb2xlL2JvcmctYWRtaW4tY28wQgYDVR0f +BDswOTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2Nh +bXB1cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNJADBGAiEA9w4qp3nHpXo+6d7mZc69 +QoALfP5ynfBCArt8bAlToo8CIQCgc/lTfl2BtBko+7h/w6pKxLeuoQkvCL5gHFyK +LXE6vA== +-----END CERTIFICATE----- diff --git a/security/advancedtls/testdata/crl/revokedLeaf.pem b/security/advancedtls/testdata/crl/revokedLeaf.pem new file mode 100644 index 000000000000..b7541abf6214 --- /dev/null +++ b/security/advancedtls/testdata/crl/revokedLeaf.pem @@ -0,0 +1,59 @@ +-----BEGIN CERTIFICATE----- +MIIDAzCCAqmgAwIBAgITTwodm6C4ZabFVUVa5yBw0TbzJTAKBggqhkjOPQQDAjCB +pTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDU1v +dW50YWluIFZpZXcxEzARBgNVBAoTCkdvb2dsZSBMTEMxJjARBgNVBAsTClByb2R1 +Y3Rpb24wEQYDVQQLEwpjYW1wdXMtc2xuMSwwKgYDVQQDEyNSb290IENBICgyMDIx +LTAyLTAyVDA3OjMyOjU3LTA4OjAwKTAgFw0yMTAyMDIxNTMyNTdaGA85OTk5MTIz +MTIzNTk1OVowgaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYw +FAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYD +VQQLEwpQcm9kdWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9v +dCBDQSAoMjAyMS0wMi0wMlQwNzozMjo1Ny0wODowMCkwWTATBgcqhkjOPQIBBggq +hkjOPQMBBwNCAARoZnzQWvAoyhvCLA2cFIK17khSaA9aA+flS5X9fLRt4RsfPCx3 +kim7wYKQSmBhQdc1UM4h3969r1c1Fvsh2H9qo4GzMIGwMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU36DFMC1JOVvlCq+tunitm2xo3OowHwYDVR0jBBgwFoAU36DF +MC1JOVvlCq+tunitm2xo3OowLgYDVR0RBCcwJYYjc3BpZmZlOi8vY2FtcHVzLXNs +bi5wcm9kLmdvb2dsZS5jb20wCgYIKoZIzj0EAwIDSAAwRQIgN7S9dQOQzNih92ag +7c5uQxuz+M6wnxWj/uwGQIIghRUCIQD2UDH6kkRSYQuyP0oN7XYO3XFjmZ2Yer6m +1ZS8fyWYYA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDjTCCAzKgAwIBAgIUAOmArBu9gihLTlqP3W7Et0UoocEwCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAy +MS0wMi0wMlQwNzozMjo1Ny0wODowMCkwIBcNMjEwMjAyMTUzMjU3WhgPOTk5OTEy +MzEyMzU5NTlaMIGlMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEW +MBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UEChMKR29vZ2xlIExMQzEmMBEG +A1UECxMKUHJvZHVjdGlvbjARBgNVBAsTCmNhbXB1cy1zbG4xLDAqBgNVBAMTI25v +ZGUgQ0EgKDIwMjEtMDItMDJUMDc6MzI6NTctMDg6MDApMFkwEwYHKoZIzj0CAQYI +KoZIzj0DAQcDQgAEfrgVEVQfSEFeCF1/FGeW7oq0yxecenT1BESfj4Z0zJ8p7P9W +bj1o6Rn6dUNlEhGrx7E3/4NFJ0cL1BSNGHkjiqOCATowggE2MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUpZ30UJXB4lI9j2SzodCtRFckrRcwHwYDVR0jBBgwFoAU +36DFMC1JOVvlCq+tunitm2xo3OowMwYDVR0RBCwwKoYoc3BpZmZlOi8vbm9kZS5j +YW1wdXMtc2xuLnByb2QuZ29vZ2xlLmNvbTA7BgNVHR4BAf8EMTAvoC0wK4YpY3Nj +cy10ZWFtLm5vZGUuY2FtcHVzLXNsbi5wcm9kLmdvb2dsZS5jb20wQgYDVR0fBDsw +OTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2NhbXB1 +cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNJADBGAiEAnuONgMqmbBlj4ibw5BgDtZUM +pboACSFJtEOJu4Yqjt0CIQDI5193J4wUcAY0BK0vO9rRfbNOIc+4ke9ieBDPSuhm +mA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICzzCCAnagAwIBAgIUAMUnv6Rpsr8ze5sZuemSU2FF2LowCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjbm9kZSBDQSAoMjAy +MS0wMi0wMlQwNzozMjo1Ny0wODowMCkwIBcNMjEwMjAyMTUzMjU3WhgPOTk5OTEy +MzEyMzU5NTlaMAAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASCmYiIHUux5WFz +S0ksJzAPL7YTEh5o5MdXgLPB/WM6x9sVsQDSYU0PF5qc9vPNhkQzGBW79dkBnxhW +AGJkFr1Po4IBJDCCASAwDgYDVR0PAQH/BAQDAgeAMB0GA1UdJQQWMBQGCCsGAQUF +BwMCBggrBgEFBQcDATAdBgNVHQ4EFgQUCR1CGEdlks0qcxCExO0rP1B/Z7UwHwYD +VR0jBBgwFoAUpZ30UJXB4lI9j2SzodCtRFckrRcwawYDVR0RAQH/BGEwX4IWanph +YjEyLnByb2QuZ29vZ2xlLmNvbYZFc3BpZmZlOi8vY3Njcy10ZWFtLm5vZGUuY2Ft +cHVzLXNsbi5wcm9kLmdvb2dsZS5jb20vcm9sZS9ib3JnLWFkbWluLWNvMEIGA1Ud +HwQ7MDkwN6A1oDOGMWh0dHA6Ly9zdGF0aWMuY29ycC5nb29nbGUuY29tL2NybC9j +YW1wdXMtc2xuL25vZGUwCgYIKoZIzj0EAwIDRwAwRAIgK9vQYNoL8HlEwWv89ioG +aQ1+8swq6Bo/5mJBrdVLvY8CIGxo6M9vJkPdObmetWNC+lmKuZDoqJWI0AAmBT2J +mR2r +-----END CERTIFICATE----- diff --git a/security/advancedtls/testdata/crl/unrevoked.pem b/security/advancedtls/testdata/crl/unrevoked.pem new file mode 100644 index 000000000000..5c5fc58a7a5e --- /dev/null +++ b/security/advancedtls/testdata/crl/unrevoked.pem @@ -0,0 +1,58 @@ +-----BEGIN CERTIFICATE----- +MIIDBDCCAqqgAwIBAgIUALy864QhnkTdceLH52k2XVOe8IQwCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAy +MS0wMi0wMlQwNzozMDozNi0wODowMCkwIBcNMjEwMjAyMTUzMDM2WhgPOTk5OTEy +MzEyMzU5NTlaMIGlMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEW +MBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UEChMKR29vZ2xlIExMQzEmMBEG +A1UECxMKUHJvZHVjdGlvbjARBgNVBAsTCmNhbXB1cy1zbG4xLDAqBgNVBAMTI1Jv +b3QgQ0EgKDIwMjEtMDItMDJUMDc6MzA6MzYtMDg6MDApMFkwEwYHKoZIzj0CAQYI +KoZIzj0DAQcDQgAEYv/JS5hQ5kIgdKqYZWTKCO/6gloHAmIb1G8lmY0oXLXYNHQ4 +qHN7/pPtlcHQp0WK/hM8IGvgOUDoynA8mj0H9KOBszCBsDAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFPQNtnCIBcG4ReQgoVi0kPgTROseMB8GA1UdIwQYMBaAFPQN +tnCIBcG4ReQgoVi0kPgTROseMC4GA1UdEQQnMCWGI3NwaWZmZTovL2NhbXB1cy1z +bG4ucHJvZC5nb29nbGUuY29tMAoGCCqGSM49BAMCA0gAMEUCIQDwBn20DB4X/7Uk +Q5BR8JxQYUPxOfvuedjfeA8bPvQ2FwIgOEWa0cXJs1JxarILJeCXtdXvBgu6LEGQ +3Pk/bgz8Gek= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDizCCAzKgAwIBAgIUAM/6RKQ7Vke0i4xp5LaAqV73cmIwCgYIKoZIzj0EAwIw +gaUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1N +b3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgTExDMSYwEQYDVQQLEwpQcm9k +dWN0aW9uMBEGA1UECxMKY2FtcHVzLXNsbjEsMCoGA1UEAxMjUm9vdCBDQSAoMjAy +MS0wMi0wMlQwNzozMDozNi0wODowMCkwIBcNMjEwMjAyMTUzMDM2WhgPOTk5OTEy +MzEyMzU5NTlaMIGlMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEW +MBQGA1UEBxMNTW91bnRhaW4gVmlldzETMBEGA1UEChMKR29vZ2xlIExMQzEmMBEG +A1UECxMKUHJvZHVjdGlvbjARBgNVBAsTCmNhbXB1cy1zbG4xLDAqBgNVBAMTI25v +ZGUgQ0EgKDIwMjEtMDItMDJUMDc6MzA6MzYtMDg6MDApMFkwEwYHKoZIzj0CAQYI +KoZIzj0DAQcDQgAEllnhxmMYiUPUgRGmenbnm10gXpM94zHx3D1/HumPs6arjYuT +Zlhx81XL+g4bu4HII2qcGdP+Hqj/MMFNDI9z4aOCATowggE2MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUGOhXkmeT+CnWTt+ZbCS9OT9wX8gwHwYDVR0jBBgwFoAU +9A22cIgFwbhF5CChWLSQ+BNE6x4wMwYDVR0RBCwwKoYoc3BpZmZlOi8vbm9kZS5j +YW1wdXMtc2xuLnByb2QuZ29vZ2xlLmNvbTA7BgNVHR4BAf8EMTAvoC0wK4YpY3Nj +cy10ZWFtLm5vZGUuY2FtcHVzLXNsbi5wcm9kLmdvb2dsZS5jb20wQgYDVR0fBDsw +OTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2NhbXB1 +cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNHADBEAiA86egqPw0qyapAeMGbHxrmYZYa +i5ARQsSKRmQixgYizQIgW+2iRWN6Kbqt4WcwpmGv/xDckdRXakF5Ign/WUDO5u4= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICzzCCAnWgAwIBAgITYjjKfYZUKQNUjNyF+hLDGpHJKTAKBggqhkjOPQQDAjCB +pTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDU1v +dW50YWluIFZpZXcxEzARBgNVBAoTCkdvb2dsZSBMTEMxJjARBgNVBAsTClByb2R1 +Y3Rpb24wEQYDVQQLEwpjYW1wdXMtc2xuMSwwKgYDVQQDEyNub2RlIENBICgyMDIx +LTAyLTAyVDA3OjMwOjM2LTA4OjAwKTAgFw0yMTAyMDIxNTMwMzZaGA85OTk5MTIz +MTIzNTk1OVowADBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD4r4+nCgZExYF8v +CLvGn0lY/cmam8mAkJDXRN2Ja2t+JwaTOptPmbbXft+1NTk5gCg5wB+FJCnaV3I/ +HaxEhBWjggEkMIIBIDAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYwFAYIKwYBBQUH +AwIGCCsGAQUFBwMBMB0GA1UdDgQWBBTTCjXX1Txjc00tBg/5cFzpeCSKuDAfBgNV +HSMEGDAWgBQY6FeSZ5P4KdZO35lsJL05P3BfyDBrBgNVHREBAf8EYTBfghZqemFi +MTIucHJvZC5nb29nbGUuY29thkVzcGlmZmU6Ly9jc2NzLXRlYW0ubm9kZS5jYW1w +dXMtc2xuLnByb2QuZ29vZ2xlLmNvbS9yb2xlL2JvcmctYWRtaW4tY28wQgYDVR0f +BDswOTA3oDWgM4YxaHR0cDovL3N0YXRpYy5jb3JwLmdvb2dsZS5jb20vY3JsL2Nh +bXB1cy1zbG4vbm9kZTAKBggqhkjOPQQDAgNIADBFAiBq3URViNyMLpvzZHC1Y+4L ++35guyIJfjHu08P3S8/xswIhAJtWSQ1ZtozdOzGxg7GfUo4hR+5SP6rBTgIqXEfq +48fW +-----END CERTIFICATE----- diff --git a/security/authorization/engine/engine.go b/security/authorization/engine/engine.go index 596382e49b8d..970c560af722 100644 --- a/security/authorization/engine/engine.go +++ b/security/authorization/engine/engine.go @@ -253,13 +253,16 @@ func getDecision(engine *policyEngine, match bool) Decision { return DecisionDeny } -// Returns the authorization decision of a single policy engine based on activation. -// If any policy matches, the decision matches the engine's action, and the first -// matching policy name will be returned. -// Else if any policy is missing attributes, the decision is unknown, and the list of -// policy names that can't be evaluated due to missing attributes will be returned. -// Else, the decision is the opposite of the engine's action, i.e. an ALLOW engine -// will return DecisionDeny, and vice versa. +// Returns the authorization decision of a single policy engine based on +// activation. If any policy matches, the decision matches the engine's +// action, and the first matching policy name will be returned. +// +// Else if any policy is missing attributes, the decision is unknown, and the +// list of policy names that can't be evaluated due to missing attributes will +// be returned. +// +// Else, the decision is the opposite of the engine's action, i.e. an ALLOW +// engine will return DecisionDeny, and vice versa. func (engine *policyEngine) evaluate(activation interpreter.Activation) (Decision, []string) { unknownPolicyNames := []string{} for policyName, program := range engine.programs { diff --git a/security/authorization/engine/engine_test.go b/security/authorization/engine/engine_test.go index c159c4bd5c21..1fcff698aa2b 100644 --- a/security/authorization/engine/engine_test.go +++ b/security/authorization/engine/engine_test.go @@ -17,6 +17,7 @@ package engine import ( + "context" "reflect" "sort" "testing" @@ -48,6 +49,10 @@ func (fake fakeProgram) Eval(vars interface{}) (ref.Val, *cel.EvalDetails, error return fake.out, nil, fake.err } +func (fake fakeProgram) ContextEval(ctx context.Context, vars interface{}) (ref.Val, *cel.EvalDetails, error) { + return fake.Eval(vars) +} + type valMock struct { val interface{} } diff --git a/security/authorization/go.mod b/security/authorization/go.mod index 0581b3401f32..68745b91c164 100644 --- a/security/authorization/go.mod +++ b/security/authorization/go.mod @@ -1,12 +1,23 @@ module google.golang.org/grpc/security/authorization -go 1.12 +go 1.17 require ( - github.com/envoyproxy/go-control-plane v0.9.5 - github.com/google/cel-go v0.5.1 - github.com/google/go-cmp v0.5.0 - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 - google.golang.org/grpc v1.31.0 - google.golang.org/protobuf v1.25.0 + github.com/envoyproxy/go-control-plane v0.11.0 + github.com/google/cel-go v0.14.0 + github.com/google/go-cmp v0.5.9 + google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 + google.golang.org/grpc v1.54.0 + google.golang.org/protobuf v1.30.0 +) + +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 // indirect + github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect + github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + golang.org/x/exp v0.0.0-20230418202329-0354be287a23 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect ) diff --git a/security/authorization/go.sum b/security/authorization/go.sum index a953711e01e6..4147bd7420a4 100644 --- a/security/authorization/go.sum +++ b/security/authorization/go.sum @@ -1,104 +1,92 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f h1:0cEys61Sr2hUBEXfNV8eyQP01oZuBgoMeHunebPirK8= -github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 h1:X8MJ0fnN5FPdcGF5Ij2/OW+HgiJrRg3AfHAx1PJtIzM= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533 h1:8wZizuKuZVu5COB7EsBYxBQz8nRcXXn5d4Gt91eJLvU= -github.com/cncf/udpa/go v0.0.0-20200313221541-5f7e5dd04533/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.5 h1:lRJIqDD8yjV1YyPRqecMdytjDLs2fTXq363aCib5xPU= -github.com/envoyproxy/go-control-plane v0.9.5/go.mod h1:OXl5to++W0ctG+EHWTFUjiypVxC/Y4VLc/KFU+al13s= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/google/cel-go v0.5.1 h1:oDsbtAwlwFPEcC8dMoRWNuVzWJUDeDZeHjoet9rXjTs= -github.com/google/cel-go v0.5.1/go.mod h1:9SvtVVTtZV4DTB1/RuAD1D2HhuqEIdmZEE/r/lrFyKE= -github.com/google/cel-spec v0.4.0/go.mod h1:2pBM5cU4UKjbPDXBgwWkiwBsVgnxknuEJ7C5TDWwORQ= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/cel-go v0.14.0 h1:LFobwuUDslWUHdQ48SXVXvQgPH2X1XVhsgOGNioAEZ4= +github.com/google/cel-go v0.14.0/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/server.go b/server.go index 7a2aa28a1147..e076ec7143bb 100644 --- a/server.go +++ b/server.go @@ -43,8 +43,8 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -57,12 +57,30 @@ import ( const ( defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" ) func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption } var statusOK = status.New(codes.OK, "") @@ -107,9 +125,12 @@ type serverWorkerData struct { type Server struct { opts serverOptions - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[transport.ServerTransport]bool + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool serve bool drain bool cv *sync.Cond // signaled when connections close for GracefulStop @@ -121,10 +142,10 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData - serverWorkerChannels []chan *serverWorkerData + serverWorkerChannel chan *serverWorkerData } type serverOptions struct { @@ -136,8 +157,9 @@ type serverOptions struct { streamInt StreamServerInterceptor chainUnaryInts []UnaryServerInterceptor chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger inTapHandle tap.ServerInHandle - statsHandler stats.Handler + statsHandlers []stats.Handler maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int @@ -152,6 +174,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ @@ -160,7 +183,9 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -170,7 +195,7 @@ type ServerOption interface { // EmptyServerOption does not alter the server configuration. It can be embedded // in another structure to build custom server options. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -194,10 +219,27 @@ func newFuncServerOption(f func(*serverOptions)) *funcServerOption { } } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -205,11 +247,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -266,6 +307,35 @@ func CustomCodec(codec Codec) ServerOption { }) } +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + // RPCCompressor returns a ServerOption that sets a compressor for outbound // messages. For backward compatibility, all outbound messages will be sent // using this compressor, regardless of incoming message compression. By @@ -376,6 +446,11 @@ func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOptio // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. func InTapHandle(h tap.ServerInHandle) ServerOption { return newFuncServerOption(func(o *serverOptions) { if o.inTapHandle != nil { @@ -388,7 +463,21 @@ func InTapHandle(h tap.ServerInHandle) ServerOption { // StatsHandler returns a ServerOption that sets the stats handler for the server. func StatsHandler(h stats.Handler) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.statsHandler = h + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl }) } @@ -415,7 +504,7 @@ func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { // new connections. If this is not set, the default is 120 seconds. A zero or // negative value will result in an immediate timeout. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -436,7 +525,7 @@ func MaxHeaderListSize(s uint32) ServerOption { // HeaderTableSize returns a ServerOption that sets the size of dynamic // header table for stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -451,7 +540,7 @@ func HeaderTableSize(s uint32) ServerOption { // zero (default) will disable workers and spawn a new goroutine for each // stream. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -465,6 +554,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -473,53 +583,54 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows different requests to be +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker(ch chan *serverWorkerData) { - // To make sure all server workers don't reset at the same time, choose a - // random number of iterations before resetting. - threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) - for completed := 0; completed < threshold; completed++ { - data, ok := <-ch +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + data, ok := <-s.serverWorkerChannel if !ok { return } - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) - data.wg.Done() + s.handleSingleStream(data) } - go s.serverWorker(ch) + go s.serverWorker() } -// initServerWorkers creates worker goroutines and channels to process incoming +func (s *Server) handleSingleStream(data *serverWorkerData) { + defer data.wg.Done() + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) +} + +// initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + s.serverWorkerChannel = make(chan *serverWorkerData) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - s.serverWorkerChannels[i] = make(chan *serverWorkerData) - go s.serverWorker(s.serverWorkerChannels[i]) + go s.serverWorker() } } func (s *Server) stopServerWorkers() { - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - close(s.serverWorkerChannels[i]) - } + close(s.serverWorkerChannel) } // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } for _, o := range opt { o.apply(&opts) } s := &Server{ lis: make(map[net.Listener]bool), opts: opts, - conns: make(map[transport.ServerTransport]bool), + conns: make(map[string]map[transport.ServerTransport]bool), services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), @@ -537,9 +648,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -663,16 +773,9 @@ func (s *Server) GetServiceInfo() map[string]ServiceInfo { // the server being stopped. var ErrServerStopped = errors.New("grpc: the server has been stopped") -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -684,9 +787,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -719,11 +821,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -733,8 +830,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -778,7 +883,7 @@ func (s *Server) Serve(lis net.Listener) error { // s.conns before this conn can be added. s.serveWG.Add(1) go func() { - s.handleRawConn(rawConn) + s.handleRawConn(lis.Addr().String(), rawConn) s.serveWG.Done() }() } @@ -786,51 +891,47 @@ func (s *Server) Serve(lis net.Listener) error { // handleRawConn forks a goroutine to handle a just-accepted connection that // has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { if s.quit.HasFired() { rawConn.Close() return } rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - // ErrConnDispatched means that the connection was dispatched away from - // gRPC; those connections should be left open. - if err != credentials.ErrConnDispatched { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - channelz.Warningf(logger, s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - rawConn.Close() - } - rawConn.SetDeadline(time.Time{}) - return - } // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) if st == nil { return } - rawConn.SetDeadline(time.Time{}) - if !s.addConn(st) { + if !s.addConn(lisAddr, st) { return } go func() { s.serveStreams(st) - s.removeConn(st) + s.removeConn(lisAddr, st) }() } +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain("") + } + s.mu.Unlock() +} + // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). -func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { config := &transport.ServerConfig{ MaxStreams: s.opts.maxConcurrentStreams, - AuthInfo: authInfo, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, InTapHandle: s.opts.inTapHandle, - StatsHandler: s.opts.statsHandler, + StatsHandlers: s.opts.statsHandlers, KeepaliveParams: s.opts.keepaliveParams, KeepalivePolicy: s.opts.keepalivePolicy, InitialWindowSize: s.opts.initialWindowSize, @@ -841,13 +942,20 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } - st, err := transport.NewServerTransport("http2", c, config) + st, err := transport.NewServerTransport(c, config) if err != nil { s.mu.Lock() s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() - c.Close() - channelz.Warning(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } return nil } @@ -855,29 +963,24 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) if s.opts.numServerWorkers > 0 { data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + case s.serverWorkerChannel <- data: + return default: // If all stream workers are busy, fallback to the default code path. - go func() { - s.handleStream(st, stream, s.traceInfo(st, stream)) - wg.Done() - }() } - } else { - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() } + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -902,32 +1005,33 @@ var _ http.Handler = (*Server)(nil) // To share one port (such as 443 for https) between gRPC and an // existing http.Handler, use a root http.Handler such as: // -// if r.ProtoMajor == 2 && strings.HasPrefix( -// r.Header.Get("Content-Type"), "application/grpc") { -// grpcServer.ServeHTTP(w, r) -// } else { -// yourMux.ServeHTTP(w, r) -// } +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } // // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally // separate from grpc-go's HTTP/2 server. Performance and features may vary // between the two paths. ServeHTTP does not support some gRPC features // available through grpc-go's HTTP/2 server. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } - if !s.addConn(st) { + if !s.addConn(listenerAddressForServeHTTP, st) { return } - defer s.removeConn(st) + defer s.removeConn(listenerAddressForServeHTTP, st) s.serveStreams(st) } @@ -955,27 +1059,40 @@ func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Strea return trInfo } -func (s *Server) addConn(st transport.ServerTransport) bool { +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") + } + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) } - s.conns[st] = true + s.conns[addr][st] = true return true } -func (s *Server) removeConn(st transport.ServerTransport) { +func (s *Server) removeConn(addr string, st transport.ServerTransport) { s.mu.Lock() defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, st) + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } s.cv.Broadcast() } } @@ -1019,8 +1136,10 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) - if err == nil && s.opts.statsHandler != nil { - s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } } return err } @@ -1040,36 +1159,40 @@ func chainUnaryServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) - } + chainedInt = chainUnaryInterceptors(interceptors) } s.opts.unaryInt = chainedInt } -// getChainUnaryHandler recursively generate the chained UnaryHandler +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { - sh := s.opts.statsHandler - if sh != nil || trInfo != nil || channelz.IsOn() { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { s.incrCallsStarted() } var statsBegin *stats.Begin - if sh != nil { + for _, sh := range shs { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, } sh.HandleRPC(stream.Context(), statsBegin) } @@ -1095,7 +1218,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.Finish() } - if sh != nil { + for _, sh := range shs { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1115,9 +1238,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } }() } - - binlog := binarylog.GetMethodLogger(stream.Method()) - if binlog != nil { + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ @@ -1137,7 +1267,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if peer, ok := peer.FromContext(ctx); ok { logEntry.PeerAddr = peer.Addr } - binlog.Log(logEntry) + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } } // comp and cp are used for compression. decomp and dc are used for @@ -1147,6 +1279,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1167,23 +1300,29 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } var payInfo *payloadInfo - if sh != nil || binlog != nil { + if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1194,19 +1333,23 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } - if sh != nil { + for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength + headerLen, - Data: d, - Length: len(d), + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, }) } - if binlog != nil { - binlog.Log(&binarylog.ClientMessage{ + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: d, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), cm) + } } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) @@ -1218,9 +1361,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1229,18 +1373,24 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if e := t.WriteStatus(stream, appStatus); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } - if binlog != nil { + if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { // Only log serverHeader if there was header. Otherwise it can // be trailer only. - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + } } - binlog.Log(&binarylog.ServerTrailer{ + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), st) + } } return appErr } @@ -1249,6 +1399,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). @@ -1266,26 +1421,34 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerTrailer{ + } + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), st) + } } return err } - if binlog != nil { + if len(binlogs) != 0 { h, _ := stream.Header() - binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) - binlog.Log(&binarylog.ServerMessage{ + } + sm := &binarylog.ServerMessage{ Message: reply, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), sm) + } } if channelz.IsOn() { t.IncrMsgSent() @@ -1296,14 +1459,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) - if binlog != nil { - binlog.Log(&binarylog.ServerTrailer{ + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, - }) + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), st) + } } - return err + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1321,22 +1486,24 @@ func chainStreamServerInterceptors(s *Server) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) - } + chainedInt = chainStreamInterceptors(interceptors) } s.opts.streamInt = chainedInt } -// getChainStreamHandler recursively generate the chained StreamHandler +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { if curr == len(interceptors)-1 { return finalHandler } - - return func(srv interface{}, ss ServerStream) error { - return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1344,29 +1511,33 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if channelz.IsOn() { s.incrCallsStarted() } - sh := s.opts.statsHandler + shs := s.opts.statsHandlers var statsBegin *stats.Begin - if sh != nil { + if len(shs) != 0 { beginTime := time.Now() statsBegin = &stats.Begin{ - BeginTime: beginTime, + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) } - sh.HandleRPC(stream.Context(), statsBegin) } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, - statsHandler: sh, + statsHandler: shs, } - if sh != nil || trInfo != nil || channelz.IsOn() { + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { // See comment in processUnaryRPC on defers. defer func() { if trInfo != nil { @@ -1380,7 +1551,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } - if sh != nil { + if len(shs) != 0 { end := &stats.End{ BeginTime: statsBegin.BeginTime, EndTime: time.Now(), @@ -1388,7 +1559,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } } if channelz.IsOn() { @@ -1401,8 +1574,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp }() } - ss.binlog = binarylog.GetMethodLogger(stream.Method()) - if ss.binlog != nil { + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1421,7 +1601,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if peer, ok := peer.FromContext(ss.Context()); ok { logEntry.PeerAddr = peer.Addr } - ss.binlog.Log(logEntry) + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), logEntry) + } } // If dc is set and matches the stream's compression, use it. Otherwise, try @@ -1443,15 +1625,23 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) } @@ -1473,7 +1663,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1482,13 +1674,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), st) + } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1497,14 +1692,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ServerTrailer{ + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), st) + } } - return err + return t.WriteStatus(ss.s, statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1519,7 +1716,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() @@ -1578,7 +1775,7 @@ type streamKey struct{} // NewContextWithServerTransportStream creates a new context from ctx and // attaches stream to it. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1593,7 +1790,7 @@ func NewContextWithServerTransportStream(ctx context.Context, stream ServerTrans // // See also NewContextWithServerTransportStream. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1608,7 +1805,7 @@ type ServerTransportStream interface { // ctx. Returns nil if the given context has no stream associated with it // (which implies it is not an RPC invocation context). // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1630,16 +1827,12 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis s.lis = nil - st := s.conns + conns := s.conns s.conns = nil // interrupt GracefulStop if Stop and GracefulStop are called concurrently. s.cv.Broadcast() @@ -1648,8 +1841,10 @@ func (s *Server) Stop() { for lis := range listeners { lis.Close() } - for c := range st { - c.Close() + for _, cs := range conns { + for st := range cs { + st.Close(errors.New("Server.Stop called")) + } } if s.opts.numServerWorkers > 0 { s.stopServerWorkers() @@ -1670,11 +1865,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() @@ -1686,8 +1877,10 @@ func (s *Server) GracefulStop() { } s.lis = nil if !s.drain { - for st := range s.conns { - st.Drain() + for _, conns := range s.conns { + for st := range conns { + st.Drain("graceful_stop") + } } s.drain = true } @@ -1725,12 +1918,26 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return codec } -// SetHeader sets the header metadata. -// When called multiple times, all the provided metadata will be merged. -// All the metadata will be sent out when one of the following happens: -// - grpc.SendHeader() is called; -// - The first response is sent out; -// - An RPC status is sent out (error or success). +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1742,8 +1949,14 @@ func SetHeader(ctx context.Context, md metadata.MD) error { return stream.SetHeader(md) } -// SendHeader sends header metadata. It may be called at most once. -// The provided md and headers set by SetHeader() will be sent. +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SendHeader(ctx context.Context, md metadata.MD) error { stream := ServerTransportStreamFromContext(ctx) if stream == nil { @@ -1755,8 +1968,66 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil @@ -1785,3 +2056,22 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} diff --git a/server_test.go b/server_test.go index fcfde30706c3..85a8f5bf72eb 100644 --- a/server_test.go +++ b/server_test.go @@ -22,17 +22,27 @@ import ( "context" "net" "reflect" + "strconv" "strings" "testing" "time" + "github.com/google/go-cmp/cmp" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" ) type emptyServiceServer interface{} type testServer struct{} +func errorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + func (s) TestStopBeforeServe(t *testing.T) { lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -121,6 +131,34 @@ func (s) TestGetServiceInfo(t *testing.T) { } } +func (s) TestRetryChainedInterceptor(t *testing.T) { + var records []int + i1 := func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) { + records = append(records, 1) + // call handler twice to simulate a retry here. + handler(ctx, req) + return handler(ctx, req) + } + i2 := func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) { + records = append(records, 2) + return handler(ctx, req) + } + i3 := func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) { + records = append(records, 3) + return handler(ctx, req) + } + + ii := chainUnaryInterceptors([]UnaryServerInterceptor{i1, i2, i3}) + + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, nil + } + ii(context.Background(), nil, nil, handler) + if !cmp.Equal(records, []int{1, 2, 3, 2, 3}) { + t.Fatalf("retry failed on chained interceptors: %v", records) + } +} + func (s) TestStreamContext(t *testing.T) { expectedStream := &transport.Stream{} ctx := NewContextWithServerTransportStream(context.Background(), expectedStream) @@ -130,3 +168,59 @@ func (s) TestStreamContext(t *testing.T) { t.Fatalf("GetStreamFromContext(%v) = %v, %t, want: %v, true", ctx, stream, ok, expectedStream) } } + +func BenchmarkChainUnaryInterceptor(b *testing.B) { + for _, n := range []int{1, 3, 5, 10} { + n := n + b.Run(strconv.Itoa(n), func(b *testing.B) { + interceptors := make([]UnaryServerInterceptor, 0, n) + for i := 0; i < n; i++ { + interceptors = append(interceptors, func( + ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler, + ) (interface{}, error) { + return handler(ctx, req) + }) + } + + s := NewServer(ChainUnaryInterceptor(interceptors...)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err := s.opts.unaryInt(context.Background(), nil, nil, + func(ctx context.Context, req interface{}) (interface{}, error) { + return nil, nil + }, + ); err != nil { + b.Fatal(err) + } + } + }) + } +} + +func BenchmarkChainStreamInterceptor(b *testing.B) { + for _, n := range []int{1, 3, 5, 10} { + n := n + b.Run(strconv.Itoa(n), func(b *testing.B) { + interceptors := make([]StreamServerInterceptor, 0, n) + for i := 0; i < n; i++ { + interceptors = append(interceptors, func( + srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler, + ) error { + return handler(srv, ss) + }) + } + + s := NewServer(ChainStreamInterceptor(interceptors...)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := s.opts.streamInt(nil, nil, nil, func(srv interface{}, stream ServerStream) error { + return nil + }); err != nil { + b.Fatal(err) + } + } + }) + } +} diff --git a/service_config.go b/service_config.go index 22c4240cf7e8..0df11fc09882 100644 --- a/service_config.go +++ b/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -57,10 +55,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancerName will override this. This is deprecated; - // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig - // will be used. + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. LB *string // lbConfig is the service config's load balancing configuration. If @@ -107,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -130,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -202,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -218,7 +171,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -227,7 +180,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -253,18 +206,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -284,13 +232,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -313,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -333,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } @@ -381,6 +321,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/service_config_test.go b/service_config_test.go index b3c6988e8d97..90ed40a68021 100644 --- a/service_config_test.go +++ b/service_config_test.go @@ -20,8 +20,6 @@ package grpc import ( "encoding/json" - "fmt" - "math" "reflect" "testing" "time" @@ -449,55 +447,6 @@ func (s) TestParseMethodConfigDuplicatedName(t *testing.T) { }) } -func (s) TestParseDuration(t *testing.T) { - testCases := []struct { - s *string - want *time.Duration - err bool - }{ - {s: nil, want: nil}, - {s: newString("1s"), want: newDuration(time.Second)}, - {s: newString("-1s"), want: newDuration(-time.Second)}, - {s: newString("1.1s"), want: newDuration(1100 * time.Millisecond)}, - {s: newString("1.s"), want: newDuration(time.Second)}, - {s: newString("1.0s"), want: newDuration(time.Second)}, - {s: newString(".002s"), want: newDuration(2 * time.Millisecond)}, - {s: newString(".002000s"), want: newDuration(2 * time.Millisecond)}, - {s: newString("0.003s"), want: newDuration(3 * time.Millisecond)}, - {s: newString("0.000004s"), want: newDuration(4 * time.Microsecond)}, - {s: newString("5000.000000009s"), want: newDuration(5000*time.Second + 9*time.Nanosecond)}, - {s: newString("4999.999999999s"), want: newDuration(5000*time.Second - time.Nanosecond)}, - {s: newString("1"), err: true}, - {s: newString("s"), err: true}, - {s: newString(".s"), err: true}, - {s: newString("1 s"), err: true}, - {s: newString(" 1s"), err: true}, - {s: newString("1ms"), err: true}, - {s: newString("1.1.1s"), err: true}, - {s: newString("Xs"), err: true}, - {s: newString("as"), err: true}, - {s: newString(".0000000001s"), err: true}, - {s: newString(fmt.Sprint(math.MaxInt32) + "s"), want: newDuration(math.MaxInt32 * time.Second)}, - {s: newString(fmt.Sprint(int64(math.MaxInt32)+1) + "s"), err: true}, - } - for _, tc := range testCases { - got, err := parseDuration(tc.s) - if tc.err != (err != nil) || - (got == nil) != (tc.want == nil) || - (got != nil && *got != *tc.want) { - wantErr := "" - if tc.err { - wantErr = "" - } - s := "" - if tc.s != nil { - s = `&"` + *tc.s + `"` - } - t.Errorf("parseDuration(%v) = %v, %v; want %v, %v", s, got, err, tc.want, wantErr) - } - } -} - func newBool(b bool) *bool { return &b } diff --git a/serviceconfig/serviceconfig.go b/serviceconfig/serviceconfig.go index 73a2f926613e..35e7a20a04ba 100644 --- a/serviceconfig/serviceconfig.go +++ b/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/shared_buffer_pool.go b/shared_buffer_pool.go new file mode 100644 index 000000000000..c3a5a9ac1f19 --- /dev/null +++ b/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(interface{}) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() interface{} { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/shared_buffer_pool_test.go b/shared_buffer_pool_test.go new file mode 100644 index 000000000000..f5ed7c8314f1 --- /dev/null +++ b/shared_buffer_pool_test.go @@ -0,0 +1,48 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "testing" + +func (s) TestSharedBufferPool(t *testing.T) { + pools := []SharedBufferPool{ + nopBufferPool{}, + NewSharedBufferPool(), + } + + lengths := []int{ + level4PoolMaxSize + 1, + level4PoolMaxSize, + level3PoolMaxSize, + level2PoolMaxSize, + level1PoolMaxSize, + level0PoolMaxSize, + } + + for _, p := range pools { + for _, l := range lengths { + bs := p.Get(l) + if len(bs) != l { + t.Fatalf("Expected buffer of length %d, got %d", l, len(bs)) + } + + p.Put(&bs) + } + } +} diff --git a/stats/opencensus/client_metrics.go b/stats/opencensus/client_metrics.go new file mode 100644 index 000000000000..4d45845f8c56 --- /dev/null +++ b/stats/opencensus/client_metrics.go @@ -0,0 +1,150 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + keyClientMethod = tag.MustNewKey("grpc_client_method") + keyClientStatus = tag.MustNewKey("grpc_client_status") +) + +// Measures, which are recorded by client stats handler: Note that due to the +// nature of how stats handlers are called on gRPC's client side, the per rpc +// unit is actually per attempt throughout this definition file. +var ( + clientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + clientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) + clientSentCompressedBytesPerRPC = stats.Int64("grpc.io/client/sent_compressed_message_bytes_per_rpc", "Total compressed bytes sent across all request messages per RPC.", stats.UnitBytes) + clientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) + clientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) + clientReceivedCompressedBytesPerRPC = stats.Int64("grpc.io/client/received_compressed_message_bytes_per_rpc", "Total compressed bytes received across all response messages per RPC.", stats.UnitBytes) + clientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + clientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "The total number of client RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) + clientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) + // Per call measure: + clientAPILatency = stats.Float64("grpc.io/client/api_latency", "The end-to-end time the gRPC library takes to complete an RPC from the application’s perspective", stats.UnitMilliseconds) +) + +var ( + // ClientSentMessagesPerRPCView is the distribution of sent messages per + // RPC, keyed on method. + ClientSentMessagesPerRPCView = &view.View{ + Measure: clientSentMessagesPerRPC, + Name: "grpc.io/client/sent_messages_per_rpc", + Description: "Distribution of sent messages per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: countDistribution, + } + // ClientReceivedMessagesPerRPCView is the distribution of received messages + // per RPC, keyed on method. + ClientReceivedMessagesPerRPCView = &view.View{ + Measure: clientReceivedMessagesPerRPC, + Name: "grpc.io/client/received_messages_per_rpc", + Description: "Distribution of received messages per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: countDistribution, + } + // ClientSentBytesPerRPCView is the distribution of sent bytes per RPC, + // keyed on method. + ClientSentBytesPerRPCView = &view.View{ + Measure: clientSentBytesPerRPC, + Name: "grpc.io/client/sent_bytes_per_rpc", + Description: "Distribution of sent bytes per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: bytesDistribution, + } + // ClientSentCompressedMessageBytesPerRPCView is the distribution of + // compressed sent message bytes per RPC, keyed on method. + ClientSentCompressedMessageBytesPerRPCView = &view.View{ + Measure: clientSentCompressedBytesPerRPC, + Name: "grpc.io/client/sent_compressed_message_bytes_per_rpc", + Description: "Distribution of sent compressed message bytes per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: bytesDistribution, + } + // ClientReceivedBytesPerRPCView is the distribution of received bytes per + // RPC, keyed on method. + ClientReceivedBytesPerRPCView = &view.View{ + Measure: clientReceivedBytesPerRPC, + Name: "grpc.io/client/received_bytes_per_rpc", + Description: "Distribution of received bytes per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: bytesDistribution, + } + // ClientReceivedCompressedMessageBytesPerRPCView is the distribution of + // compressed received message bytes per RPC, keyed on method. + ClientReceivedCompressedMessageBytesPerRPCView = &view.View{ + Measure: clientReceivedCompressedBytesPerRPC, + Name: "grpc.io/client/received_compressed_message_bytes_per_rpc", + Description: "Distribution of received compressed message bytes per RPC, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: bytesDistribution, + } + // ClientStartedRPCsView is the count of opened RPCs, keyed on method. + ClientStartedRPCsView = &view.View{ + Measure: clientStartedRPCs, + Name: "grpc.io/client/started_rpcs", + Description: "Number of opened client RPCs, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: view.Count(), + } + // ClientCompletedRPCsView is the count of completed RPCs, keyed on method + // and status. + ClientCompletedRPCsView = &view.View{ + Measure: clientRoundtripLatency, + Name: "grpc.io/client/completed_rpcs", + Description: "Number of completed RPCs by method and status.", + TagKeys: []tag.Key{keyClientMethod, keyClientStatus}, + Aggregation: view.Count(), + } + // ClientRoundtripLatencyView is the distribution of round-trip latency in + // milliseconds per RPC, keyed on method. + ClientRoundtripLatencyView = &view.View{ + Measure: clientRoundtripLatency, + Name: "grpc.io/client/roundtrip_latency", + Description: "Distribution of round-trip latency, by method.", + TagKeys: []tag.Key{keyClientMethod}, + Aggregation: millisecondsDistribution, + } + + // The following metric is per call: + + // ClientAPILatencyView is the distribution of client api latency for the + // full RPC call, keyed on method and status. + ClientAPILatencyView = &view.View{ + Measure: clientAPILatency, + Name: "grpc.io/client/api_latency", + Description: "Distribution of client api latency, by method and status", + TagKeys: []tag.Key{keyClientMethod, keyClientStatus}, + Aggregation: millisecondsDistribution, + } +) + +// DefaultClientViews is the set of client views which are considered the +// minimum required to monitor client side performance. +var DefaultClientViews = []*view.View{ + ClientSentBytesPerRPCView, + ClientReceivedBytesPerRPCView, + ClientRoundtripLatencyView, + ClientCompletedRPCsView, + ClientStartedRPCsView, +} diff --git a/stats/opencensus/e2e_test.go b/stats/opencensus/e2e_test.go new file mode 100644 index 000000000000..3b3c2bbbd908 --- /dev/null +++ b/stats/opencensus/e2e_test.go @@ -0,0 +1,1619 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "context" + "errors" + "fmt" + "io" + "reflect" + "sort" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "google.golang.org/grpc" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/leakcheck" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func init() { + // OpenCensus, once included in binary, will spawn a global goroutine + // recorder that is not controllable by application. + // https://github.com/census-instrumentation/opencensus-go/issues/1191 + leakcheck.RegisterIgnoreGoroutine("go.opencensus.io/stats/view.(*worker).start") +} + +var defaultTestTimeout = 5 * time.Second + +type fakeExporter struct { + t *testing.T + + mu sync.RWMutex + seenViews map[string]*viewInformation + seenSpans []spanInformation +} + +// viewInformation is information Exported from the view package through +// ExportView relevant to testing, i.e. a reasonably non flaky expectation of +// desired emissions to Exporter. +type viewInformation struct { + aggType view.AggType + aggBuckets []float64 + desc string + tagKeys []tag.Key + rows []*view.Row +} + +func (fe *fakeExporter) ExportView(vd *view.Data) { + fe.mu.Lock() + defer fe.mu.Unlock() + fe.seenViews[vd.View.Name] = &viewInformation{ + aggType: vd.View.Aggregation.Type, + aggBuckets: vd.View.Aggregation.Buckets, + desc: vd.View.Description, + tagKeys: vd.View.TagKeys, + rows: vd.Rows, + } +} + +// compareRows compares rows with respect to the information desired to test. +// Both the tags representing the rows and also the data of the row are tested +// for equality. Rows are in nondeterministic order when ExportView is called, +// but handled inside this function by sorting. +func compareRows(rows []*view.Row, rows2 []*view.Row) bool { + if len(rows) != len(rows2) { + return false + } + // Sort both rows according to the same rule. This is to take away non + // determinism in the row ordering passed to the Exporter, while keeping the + // row data. + sort.Slice(rows, func(i, j int) bool { + return rows[i].String() > rows[j].String() + }) + + sort.Slice(rows2, func(i, j int) bool { + return rows2[i].String() > rows2[j].String() + }) + + for i, row := range rows { + if !cmp.Equal(row.Tags, rows2[i].Tags, cmp.Comparer(func(a tag.Key, b tag.Key) bool { + return a.Name() == b.Name() + })) { + return false + } + if !compareData(row.Data, rows2[i].Data) { + return false + } + } + return true +} + +// compareData returns whether the two aggregation data's are equal to each +// other with respect to parts of the data desired for correct emission. The +// function first makes sure the two types of aggregation data are the same, and +// then checks the equality for the respective aggregation data type. +func compareData(ad view.AggregationData, ad2 view.AggregationData) bool { + if ad == nil && ad2 == nil { + return true + } + if ad == nil || ad2 == nil { + return false + } + if reflect.TypeOf(ad) != reflect.TypeOf(ad2) { + return false + } + switch ad1 := ad.(type) { + case *view.DistributionData: + dd2 := ad2.(*view.DistributionData) + // Count and Count Per Buckets are reasonable for correctness, + // especially since we verify equality of bucket endpoints elsewhere. + if ad1.Count != dd2.Count { + return false + } + for i, count := range ad1.CountPerBucket { + if count != dd2.CountPerBucket[i] { + return false + } + } + case *view.CountData: + cd2 := ad2.(*view.CountData) + return ad1.Value == cd2.Value + + // gRPC open census plugin does not have these next two types of aggregation + // data types present, for now just check for type equality between the two + // aggregation data points (done above). + // case *view.SumData + // case *view.LastValueData: + } + return true +} + +func (vi *viewInformation) Equal(vi2 *viewInformation) bool { + if vi == nil && vi2 == nil { + return true + } + if vi == nil || vi2 == nil { + return false + } + if vi.aggType != vi2.aggType { + return false + } + if !cmp.Equal(vi.aggBuckets, vi2.aggBuckets) { + return false + } + if vi.desc != vi2.desc { + return false + } + if !cmp.Equal(vi.tagKeys, vi2.tagKeys, cmp.Comparer(func(a tag.Key, b tag.Key) bool { + return a.Name() == b.Name() + })) { + return false + } + if !compareRows(vi.rows, vi2.rows) { + return false + } + return true +} + +// distributionDataLatencyCount checks if the view information contains the +// desired distrubtion latency total count that falls in buckets of 5 seconds or +// less. This must be called with non nil view information that is aggregated +// with distribution data. Returns a nil error if correct count information +// found, non nil error if correct information not found. +func distributionDataLatencyCount(vi *viewInformation, countWant int64, wantTags [][]tag.Tag) error { + var totalCount int64 + var largestIndexWithFive int + for i, bucket := range vi.aggBuckets { + // Distribution for latency is measured in milliseconds, so 5 * 1000 = + // 5000. + if bucket > 5000 { + largestIndexWithFive = i + break + } + } + // Sort rows by string name. This is to take away non determinism in the row + // ordering passed to the Exporter, while keeping the row data. + sort.Slice(vi.rows, func(i, j int) bool { + return vi.rows[i].String() > vi.rows[j].String() + }) + // Iterating through rows sums up data points for all methods. In this case, + // a data point for the unary and for the streaming RPC. + for i, row := range vi.rows { + // The method names corresponding to unary and streaming call should + // have the leading slash removed. + if diff := cmp.Diff(row.Tags, wantTags[i], cmp.Comparer(func(a tag.Key, b tag.Key) bool { + return a.Name() == b.Name() + })); diff != "" { + return fmt.Errorf("wrong tag keys for unary method -got, +want: %v", diff) + } + // This could potentially have an extra measurement in buckets above 5s, + // but that's fine. Count of buckets that could contain up to 5s is a + // good enough assertion. + for i, count := range row.Data.(*view.DistributionData).CountPerBucket { + if i >= largestIndexWithFive { + break + } + totalCount = totalCount + count + } + } + if totalCount != countWant { + return fmt.Errorf("wrong total count for counts under 5: %v, wantCount: %v", totalCount, countWant) + } + return nil +} + +// waitForServerCompletedRPCs waits until both Unary and Streaming metric rows +// appear, in two separate rows, for server completed RPC's view. Returns an +// error if the Unary and Streaming metric are not found within the passed +// context's timeout. +func waitForServerCompletedRPCs(ctx context.Context) error { + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + rows, err := view.RetrieveData("grpc.io/server/completed_rpcs") + if err != nil { + continue + } + unaryFound := false + streamingFound := false + for _, row := range rows { + for _, tag := range row.Tags { + if tag.Value == "grpc.testing.TestService/UnaryCall" { + unaryFound = true + break + } else if tag.Value == "grpc.testing.TestService/FullDuplexCall" { + streamingFound = true + break + } + } + if unaryFound && streamingFound { + return nil + } + } + } + return fmt.Errorf("timeout when waiting for Unary and Streaming rows to be present for \"grpc.io/server/completed_rpcs\"") +} + +// TestAllMetricsOneFunction tests emitted metrics from gRPC. It registers all +// the metrics provided by this package. It then configures a system with a gRPC +// Client and gRPC server with the OpenCensus Dial and Server Option configured, +// and makes a Unary RPC and a Streaming RPC. These two RPCs should cause +// certain emissions for each registered metric through the OpenCensus View +// package. +func (s) TestAllMetricsOneFunction(t *testing.T) { + allViews := []*view.View{ + ClientStartedRPCsView, + ServerStartedRPCsView, + ClientCompletedRPCsView, + ServerCompletedRPCsView, + ClientSentBytesPerRPCView, + ClientSentCompressedMessageBytesPerRPCView, + ServerSentBytesPerRPCView, + ServerSentCompressedMessageBytesPerRPCView, + ClientReceivedBytesPerRPCView, + ClientReceivedCompressedMessageBytesPerRPCView, + ServerReceivedBytesPerRPCView, + ServerReceivedCompressedMessageBytesPerRPCView, + ClientSentMessagesPerRPCView, + ServerSentMessagesPerRPCView, + ClientReceivedMessagesPerRPCView, + ServerReceivedMessagesPerRPCView, + ClientRoundtripLatencyView, + ServerLatencyView, + ClientAPILatencyView, + } + view.Register(allViews...) + // Unregister unconditionally in this defer to correctly cleanup globals in + // error conditions. + defer view.Unregister(allViews...) + fe := &fakeExporter{ + t: t, + seenViews: make(map[string]*viewInformation), + } + view.RegisterExporter(fe) + defer view.UnregisterExporter(fe) + + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{Payload: &testpb.Payload{ + Body: make([]byte, 10000), + }}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + } + }, + } + if err := ss.Start([]grpc.ServerOption{ServerOption(TraceOptions{})}, DialOption(TraceOptions{})); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Make two RPC's, a unary RPC and a streaming RPC. These should cause + // certain metrics to be emitted. + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{ + Body: make([]byte, 10000), + }}, grpc.UseCompressor(gzip.Name)); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + cmtk := tag.MustNewKey("grpc_client_method") + smtk := tag.MustNewKey("grpc_server_method") + cstk := tag.MustNewKey("grpc_client_status") + sstk := tag.MustNewKey("grpc_server_status") + wantMetrics := []struct { + metric *view.View + wantVI *viewInformation + wantTags [][]tag.Tag // for non determinstic (i.e. latency) metrics. First dimension represents rows. + }{ + { + metric: ClientStartedRPCsView, + wantVI: &viewInformation{ + aggType: view.AggTypeCount, + aggBuckets: []float64{}, + desc: "Number of opened client RPCs, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + }, + }, + }, + { + metric: ServerStartedRPCsView, + wantVI: &viewInformation{ + aggType: view.AggTypeCount, + aggBuckets: []float64{}, + desc: "Number of opened server RPCs, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + }, + }, + }, + { + metric: ClientCompletedRPCsView, + wantVI: &viewInformation{ + aggType: view.AggTypeCount, + aggBuckets: []float64{}, + desc: "Number of completed RPCs by method and status.", + tagKeys: []tag.Key{ + cmtk, + cstk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + { + Key: cstk, + Value: "OK", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + { + Key: cstk, + Value: "OK", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + }, + }, + }, + { + metric: ServerCompletedRPCsView, + wantVI: &viewInformation{ + aggType: view.AggTypeCount, + aggBuckets: []float64{}, + desc: "Number of completed RPCs by method and status.", + tagKeys: []tag.Key{ + smtk, + sstk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + { + Key: sstk, + Value: "OK", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + { + Key: sstk, + Value: "OK", + }, + }, + Data: &view.CountData{ + Value: 1, + }, + }, + }, + }, + }, + { + metric: ClientSentBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of sent bytes per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientSentCompressedMessageBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of sent compressed message bytes per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerSentBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of sent bytes per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerSentCompressedMessageBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of sent compressed message bytes per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientReceivedBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of received bytes per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientReceivedCompressedMessageBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of received compressed message bytes per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerReceivedBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of received bytes per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerReceivedCompressedMessageBytesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: bytesDistributionBounds, + desc: "Distribution of received compressed message bytes per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientSentMessagesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: countDistributionBounds, + desc: "Distribution of sent messages per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerSentMessagesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: countDistributionBounds, + desc: "Distribution of sent messages per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientReceivedMessagesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: countDistributionBounds, + desc: "Distribution of received messages per RPC, by method.", + tagKeys: []tag.Key{ + cmtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ServerReceivedMessagesPerRPCView, + wantVI: &viewInformation{ + aggType: view.AggTypeDistribution, + aggBuckets: countDistributionBounds, + desc: "Distribution of received messages per RPC, by method.", + tagKeys: []tag.Key{ + smtk, + }, + rows: []*view.Row{ + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + { + Tags: []tag.Tag{ + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + Data: &view.DistributionData{ + Count: 1, + CountPerBucket: []int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, + }, + }, + }, + { + metric: ClientRoundtripLatencyView, + wantTags: [][]tag.Tag{ + { + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + { + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + }, + }, + { + metric: ServerLatencyView, + wantTags: [][]tag.Tag{ + { + { + Key: smtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + }, + { + { + Key: smtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + }, + }, + }, + // Per call metrics: + { + metric: ClientAPILatencyView, + wantTags: [][]tag.Tag{ + { + { + Key: cmtk, + Value: "grpc.testing.TestService/UnaryCall", + }, + { + Key: cstk, + Value: "OK", + }, + }, + { + { + Key: cmtk, + Value: "grpc.testing.TestService/FullDuplexCall", + }, + { + Key: cstk, + Value: "OK", + }, + }, + }, + }, + } + // Server Side stats.End call happens asynchronously for both Unary and + // Streaming calls with respect to the RPC returning client side. Thus, add + // a sync point at the global view package level for these two rows to be + // recorded, which will be synchronously uploaded to exporters right after. + if err := waitForServerCompletedRPCs(ctx); err != nil { + t.Fatal(err) + } + view.Unregister(allViews...) + // Assert the expected emissions for each metric match the expected + // emissions. + for _, wantMetric := range wantMetrics { + metricName := wantMetric.metric.Name + var vi *viewInformation + if vi = fe.seenViews[metricName]; vi == nil { + t.Fatalf("couldn't find %v in the views exported, never collected", metricName) + } + + // For latency metrics, there is a lot of non determinism about + // the exact milliseconds of RPCs that finish. Thus, rather than + // declare the exact data you want, make sure the latency + // measurement points for the two RPCs above fall within buckets + // that fall into less than 5 seconds, which is the rpc timeout. + if metricName == "grpc.io/client/roundtrip_latency" || metricName == "grpc.io/server/server_latency" || metricName == "grpc.io/client/api_latency" { + // RPCs have a context timeout of 5s, so all the recorded + // measurements (one per RPC - two total) should fall within 5 + // second buckets. + if err := distributionDataLatencyCount(vi, 2, wantMetric.wantTags); err != nil { + t.Fatalf("Invalid OpenCensus export view data for metric %v: %v", metricName, err) + } + continue + } + if diff := cmp.Diff(vi, wantMetric.wantVI); diff != "" { + t.Fatalf("got unexpected viewInformation for metric %v, diff (-got, +want): %v", metricName, diff) + } + // Note that this test only fatals with one error if a metric fails. + // This is fine, as all are expected to pass so if a single one fails + // you can figure it out and iterate as needed. + } +} + +// TestOpenCensusTags tests this instrumentation code's ability to propagate +// OpenCensus tags across the wire. It also tests the server stats handler's +// functionality of adding the server method tag for the application to see. The +// test makes an Unary RPC without a tag map and with a tag map, and expects to +// see a tag map at the application layer with server method tag in the first +// case, and a tag map at the application layer with the populated tag map plus +// server method tag in second case. +func (s) TestOpenCensusTags(t *testing.T) { + // This stub servers functions represent the application layer server side. + // This is the intended feature being tested: that open census tags + // populated at the client side application layer end up at the server side + // application layer with the server method tag key in addition to the map + // populated at the client side application layer if populated. + tmCh := testutils.NewChannel() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + // Do the sends of the tag maps for assertions in this main testing + // goroutine. Do the receives and assertions in a forked goroutine. + if tm := tag.FromContext(ctx); tm != nil { + tmCh.Send(tm) + } else { + tmCh.Send(errors.New("no tag map received server side")) + } + return &testpb.SimpleResponse{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{ServerOption(TraceOptions{})}, DialOption(TraceOptions{})); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + key1 := tag.MustNewKey("key 1") + wg := sync.WaitGroup{} + wg.Add(1) + readerErrCh := testutils.NewChannel() + // Spawn a goroutine to receive and validation two tag maps received by the + // server application code. + go func() { + defer wg.Done() + unaryCallMethodName := "grpc.testing.TestService/UnaryCall" + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Attempt to receive the tag map from the first RPC. + if tm, err := tmCh.Receive(ctx); err == nil { + tagMap, ok := tm.(*tag.Map) + // Shouldn't happen, this test sends only *tag.Map type on channel. + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", tm)) + } + // keyServerMethod should be present in this tag map received server + // side. + val, ok := tagMap.Value(keyServerMethod) + if !ok { + readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", keyServerMethod.Name())) + } + if val != unaryCallMethodName { + readerErrCh.Send(fmt.Errorf("serverMethod receieved: %v, want server method: %v", val, unaryCallMethodName)) + } + } else { + readerErrCh.Send(fmt.Errorf("error while waiting for a tag map: %v", err)) + } + readerErrCh.Send(nil) + + // Attempt to receive the tag map from the second RPC. + if tm, err := tmCh.Receive(ctx); err == nil { + tagMap, ok := tm.(*tag.Map) + // Shouldn't happen, this test sends only *tag.Map type on channel. + if !ok { + readerErrCh.Send(fmt.Errorf("received wrong type from channel: %T", tm)) + } + // key1: "value1" populated in the tag map client side should make + // its way to server. + val, ok := tagMap.Value(key1) + if !ok { + readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", key1.Name())) + } + if val != "value1" { + readerErrCh.Send(fmt.Errorf("key %v received: %v, want server method: %v", key1.Name(), val, unaryCallMethodName)) + } + // keyServerMethod should be appended to tag map as well. + val, ok = tagMap.Value(keyServerMethod) + if !ok { + readerErrCh.Send(fmt.Errorf("no key: %v present in OpenCensus tag map", keyServerMethod.Name())) + } + if val != unaryCallMethodName { + readerErrCh.Send(fmt.Errorf("key: %v received: %v, want server method: %v", keyServerMethod.Name(), val, unaryCallMethodName)) + } + } else { + readerErrCh.Send(fmt.Errorf("error while waiting for second tag map: %v", err)) + } + readerErrCh.Send(nil) + }() + + // Make a unary RPC without populating an OpenCensus tag map. The server + // side should receive an OpenCensus tag map containing only the + // keyServerMethod. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Should receive a nil error from the readerErrCh, meaning the reader + // goroutine successfully received a tag map with the keyServerMethod + // populated. + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } + + tm := &tag.Map{} + ctx = tag.NewContext(ctx, tm) + ctx, err := tag.New(ctx, tag.Upsert(key1, "value1")) + // Setup steps like this can fatal, so easier to do the RPC's and subsequent + // sends of the tag maps of the RPC's in main goroutine and have the + // corresponding receives and assertions in a forked goroutine. + if err != nil { + t.Fatalf("Error creating tag map: %v", err) + } + // Make a unary RPC with a populated OpenCensus tag map. The server side + // should receive an OpenCensus tag map containing this populated tag map + // with the keyServerMethod tag appended to it. + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + if chErr, err := readerErrCh.Receive(ctx); chErr != nil || err != nil { + if err != nil { + t.Fatalf("Should have received something from error channel: %v", err) + } + if chErr != nil { + t.Fatalf("Should have received a nil error from channel, instead received: %v", chErr) + } + } + wg.Wait() +} + +// compareSpanContext only checks the equality of the trace options, which +// represent whether the span should be sampled. The other fields are checked +// for presence in later assertions. +func compareSpanContext(sc trace.SpanContext, sc2 trace.SpanContext) bool { + return sc.TraceOptions.IsSampled() == sc2.TraceOptions.IsSampled() +} + +func compareMessageEvents(me []trace.MessageEvent, me2 []trace.MessageEvent) bool { + if len(me) != len(me2) { + return false + } + // Order matters here, message events are deterministic so no flakiness to + // test. + for i, e := range me { + e2 := me2[i] + if e.EventType != e2.EventType { + return false + } + if e.MessageID != e2.MessageID { + return false + } + if e.UncompressedByteSize != e2.UncompressedByteSize { + return false + } + if e.CompressedByteSize != e2.CompressedByteSize { + return false + } + } + return true +} + +// compareLinks compares the type of link received compared to the wanted link. +func compareLinks(ls []trace.Link, ls2 []trace.Link) bool { + if len(ls) != len(ls2) { + return false + } + for i, l := range ls { + l2 := ls2[i] + if l.Type != l2.Type { + return false + } + } + return true +} + +// spanInformation is the information received about the span. This is a subset +// of information that is important to verify that gRPC has knobs over, which +// goes through a stable OpenCensus API with well defined behavior. This keeps +// the robustness of assertions over time. +type spanInformation struct { + // SpanContext either gets pulled off the wire in certain cases server side + // or created. + sc trace.SpanContext + parentSpanID trace.SpanID + spanKind int + name string + message string + messageEvents []trace.MessageEvent + status trace.Status + links []trace.Link + hasRemoteParent bool + childSpanCount int +} + +// validateTraceAndSpanIDs checks for consistent trace ID across the full trace. +// It also asserts each span has a corresponding generated SpanID, and makes +// sure in the case of a server span and a client span, the server span points +// to the client span as its parent. This is assumed to be called with spans +// from the same RPC (thus the same trace). If called with spanInformation slice +// of length 2, it assumes first span is a server span which points to second +// span as parent and second span is a client span. These assertions are +// orthogonal to pure equality assertions, as this data is generated at runtime, +// so can only test relations between IDs (i.e. this part of the data has the +// same ID as this part of the data). +// +// Returns an error in the case of a failing assertion, non nil error otherwise. +func validateTraceAndSpanIDs(sis []spanInformation) error { + var traceID trace.TraceID + for i, si := range sis { + // Trace IDs should all be consistent across every span, since this + // function assumes called with Span from one RPC, which all fall under + // one trace. + if i == 0 { + traceID = si.sc.TraceID + } else { + if !cmp.Equal(si.sc.TraceID, traceID) { + return fmt.Errorf("TraceIDs should all be consistent: %v, %v", si.sc.TraceID, traceID) + } + } + // Due to the span IDs being 8 bytes, the documentation states that it + // is practically a mathematical uncertainty in practice to create two + // colliding IDs. Thus, for a presence check (the ID was actually + // generated, I will simply compare to the zero value, even though a + // zero value is a theoretical possibility of generation). This is + // because in practice, this zero value defined by this test will never + // collide with the generated ID. + if cmp.Equal(si.sc.SpanID, trace.SpanID{}) { + return errors.New("span IDs should be populated from the creation of the span") + } + } + // If the length of spans of an RPC is 2, it means there is a server span + // which exports first and a client span which exports second. Thus, the + // server span should point to the client span as its parent, represented + // by its ID. + if len(sis) == 2 { + if !cmp.Equal(sis[0].parentSpanID, sis[1].sc.SpanID) { + return fmt.Errorf("server span should point to the client span as its parent. parentSpanID: %v, clientSpanID: %v", sis[0].parentSpanID, sis[1].sc.SpanID) + } + } + return nil +} + +// Equal compares the constant data of the exported span information that is +// important for correctness known before runtime. +func (si spanInformation) Equal(si2 spanInformation) bool { + if !compareSpanContext(si.sc, si2.sc) { + return false + } + + if si.spanKind != si2.spanKind { + return false + } + if si.name != si2.name { + return false + } + if si.message != si2.message { + return false + } + // Ignore attribute comparison because Java doesn't even populate any so not + // important for correctness. + if !compareMessageEvents(si.messageEvents, si2.messageEvents) { + return false + } + if !cmp.Equal(si.status, si2.status) { + return false + } + // compare link type as link type child is important. + if !compareLinks(si.links, si2.links) { + return false + } + if si.hasRemoteParent != si2.hasRemoteParent { + return false + } + return si.childSpanCount == si2.childSpanCount +} + +func (fe *fakeExporter) ExportSpan(sd *trace.SpanData) { + fe.mu.Lock() + defer fe.mu.Unlock() + + // Persist the subset of data received that is important for correctness and + // to make various assertions on later. Keep the ordering as ordering of + // spans is deterministic in the context of one RPC. + gotSI := spanInformation{ + sc: sd.SpanContext, + parentSpanID: sd.ParentSpanID, + spanKind: sd.SpanKind, + name: sd.Name, + message: sd.Message, + // annotations - ignore + // attributes - ignore, I just left them in from previous but no spec + // for correctness so no need to test. Java doesn't even have any + // attributes. + messageEvents: sd.MessageEvents, + status: sd.Status, + links: sd.Links, + hasRemoteParent: sd.HasRemoteParent, + childSpanCount: sd.ChildSpanCount, + } + fe.seenSpans = append(fe.seenSpans, gotSI) +} + +// waitForServerSpan waits until a server span appears somewhere in the span +// list in an exporter. Returns an error if no server span found within the +// passed context's timeout. +func waitForServerSpan(ctx context.Context, fe *fakeExporter) error { + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + fe.mu.Lock() + for _, seenSpan := range fe.seenSpans { + if seenSpan.spanKind == trace.SpanKindServer { + fe.mu.Unlock() + return nil + } + } + fe.mu.Unlock() + } + return fmt.Errorf("timeout when waiting for server span to be present in exporter") +} + +// TestSpan tests emitted spans from gRPC. It configures a system with a gRPC +// Client and gRPC server with the OpenCensus Dial and Server Option configured, +// and makes a Unary RPC and a Streaming RPC. This should cause spans with +// certain information to be emitted from client and server side for each RPC. +func (s) TestSpan(t *testing.T) { + fe := &fakeExporter{ + t: t, + } + trace.RegisterExporter(fe) + defer trace.UnregisterExporter(fe) + + so := TraceOptions{ + TS: trace.ProbabilitySampler(1), + DisableTrace: false, + } + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + } + }, + } + if err := ss.Start([]grpc.ServerOption{ServerOption(so)}, DialOption(so)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Make a Unary RPC. This should cause a span with message events + // corresponding to the request message and response message to be emitted + // both from the client and the server. + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: &testpb.Payload{}}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + wantSI := []spanInformation{ + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + name: "Attempt.grpc.testing.TestService.UnaryCall", + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 (see comment above) + UncompressedByteSize: 2, + CompressedByteSize: 2, + }, + { + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 (see comment above) + }, + }, + hasRemoteParent: false, + }, + { + // Sampling rate of 100 percent, so this should populate every span + // with the information that this span is being sampled. Here and + // every other span emitted in this test. + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindServer, + name: "grpc.testing.TestService.UnaryCall", + // message id - "must be calculated as two different counters + // starting from 1 one for sent messages and one for received + // message. This way we guarantee that the values will be consistent + // between different implementations. In case of unary calls only + // one sent and one received message will be recorded for both + // client and server spans." + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 (see comment above) + UncompressedByteSize: 2, + CompressedByteSize: 2, + }, + { + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 (see comment above) + }, + }, + links: []trace.Link{ + { + Type: trace.LinkTypeChild, + }, + }, + // For some reason, status isn't populated in the data sent to the + // exporter. This seems wrong, but it didn't send status in old + // instrumentation code, so I'm iffy on it but fine. + hasRemoteParent: true, + }, + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindClient, + name: "grpc.testing.TestService.UnaryCall", + hasRemoteParent: false, + childSpanCount: 1, + }, + } + if err := waitForServerSpan(ctx, fe); err != nil { + t.Fatal(err) + } + var spanInfoSort = func(i, j int) bool { + // This will order into attempt span (which has an unset span kind to + // not prepend Sent. to span names in backends), then call span, then + // server span. + return fe.seenSpans[i].spanKind < fe.seenSpans[j].spanKind + } + fe.mu.Lock() + // Sort the underlying seen Spans for cmp.Diff assertions and ID + // relationship assertions. + sort.Slice(fe.seenSpans, spanInfoSort) + if diff := cmp.Diff(fe.seenSpans, wantSI); diff != "" { + fe.mu.Unlock() + t.Fatalf("got unexpected spans, diff (-got, +want): %v", diff) + } + if err := validateTraceAndSpanIDs(fe.seenSpans); err != nil { + fe.mu.Unlock() + t.Fatalf("Error in runtime data assertions: %v", err) + } + if !cmp.Equal(fe.seenSpans[1].parentSpanID, fe.seenSpans[0].sc.SpanID) { + t.Fatalf("server span should point to the client attempt span as its parent. parentSpanID: %v, clientAttemptSpanID: %v", fe.seenSpans[1].parentSpanID, fe.seenSpans[0].sc.SpanID) + } + if !cmp.Equal(fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) { + t.Fatalf("client attempt span should point to the client call span as its parent. parentSpanID: %v, clientCallSpanID: %v", fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) + } + + fe.seenSpans = nil + fe.mu.Unlock() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %v", err) + } + // Send two messages. This should be recorded in the emitted spans message + // events, with message IDs which increase for each message. + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("stream.Send failed: %v", err) + } + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("stream.Send failed: %v", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + wantSI = []spanInformation{ + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + name: "Attempt.grpc.testing.TestService.FullDuplexCall", + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeSent, + MessageID: 1, // First msg send so 1 + }, + { + EventType: trace.MessageEventTypeSent, + MessageID: 2, // Second msg send so 2 + }, + }, + hasRemoteParent: false, + }, + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindServer, + name: "grpc.testing.TestService.FullDuplexCall", + links: []trace.Link{ + { + Type: trace.LinkTypeChild, + }, + }, + messageEvents: []trace.MessageEvent{ + { + EventType: trace.MessageEventTypeRecv, + MessageID: 1, // First msg recv so 1 + }, + { + EventType: trace.MessageEventTypeRecv, + MessageID: 2, // Second msg recv so 2 + }, + }, + hasRemoteParent: true, + }, + { + sc: trace.SpanContext{ + TraceOptions: 1, + }, + spanKind: trace.SpanKindClient, + name: "grpc.testing.TestService.FullDuplexCall", + hasRemoteParent: false, + childSpanCount: 1, + }, + } + if err := waitForServerSpan(ctx, fe); err != nil { + t.Fatal(err) + } + fe.mu.Lock() + defer fe.mu.Unlock() + // Sort the underlying seen Spans for cmp.Diff assertions and ID + // relationship assertions. + sort.Slice(fe.seenSpans, spanInfoSort) + if diff := cmp.Diff(fe.seenSpans, wantSI); diff != "" { + t.Fatalf("got unexpected spans, diff (-got, +want): %v", diff) + } + if err := validateTraceAndSpanIDs(fe.seenSpans); err != nil { + t.Fatalf("Error in runtime data assertions: %v", err) + } + if !cmp.Equal(fe.seenSpans[1].parentSpanID, fe.seenSpans[0].sc.SpanID) { + t.Fatalf("server span should point to the client attempt span as its parent. parentSpanID: %v, clientAttemptSpanID: %v", fe.seenSpans[1].parentSpanID, fe.seenSpans[0].sc.SpanID) + } + if !cmp.Equal(fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) { + t.Fatalf("client attempt span should point to the client call span as its parent. parentSpanID: %v, clientCallSpanID: %v", fe.seenSpans[0].parentSpanID, fe.seenSpans[2].sc.SpanID) + } +} diff --git a/stats/opencensus/go.mod b/stats/opencensus/go.mod new file mode 100644 index 000000000000..45e95ac7d1c7 --- /dev/null +++ b/stats/opencensus/go.mod @@ -0,0 +1,21 @@ +module google.golang.org/grpc/stats/opencensus + +go 1.17 + +require ( + github.com/google/go-cmp v0.5.9 + go.opencensus.io v0.24.0 + google.golang.org/grpc v1.54.0 +) + +require ( + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/protobuf v1.30.0 // indirect +) + +replace google.golang.org/grpc => ../.. diff --git a/stats/opencensus/go.sum b/stats/opencensus/go.sum new file mode 100644 index 000000000000..f3aff2587e11 --- /dev/null +++ b/stats/opencensus/go.sum @@ -0,0 +1,1455 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/stats/opencensus/opencensus.go b/stats/opencensus/opencensus.go new file mode 100644 index 000000000000..8bebb21575e1 --- /dev/null +++ b/stats/opencensus/opencensus.go @@ -0,0 +1,251 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package opencensus implements opencensus instrumentation code for gRPC-Go +// clients and servers. +package opencensus + +import ( + "context" + "strings" + "time" + + ocstats "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +var ( + joinDialOptions = internal.JoinDialOptions.(func(...grpc.DialOption) grpc.DialOption) +) + +// TraceOptions are the tracing options for opencensus instrumentation. +type TraceOptions struct { + // TS is the Sampler used for tracing. + TS trace.Sampler + // DisableTrace determines whether traces are disabled for an OpenCensus + // Dial or Server option. will overwrite any global option setting. + DisableTrace bool +} + +// DialOption returns a dial option which enables OpenCensus instrumentation +// code for a grpc.ClientConn. +// +// Client applications interested in instrumenting their grpc.ClientConn should +// pass the dial option returned from this function as the first dial option to +// grpc.Dial(). +// +// Using this option will always lead to instrumentation, however in order to +// use the data an exporter must be registered with the OpenCensus trace package +// for traces and the OpenCensus view package for metrics. Client side has +// retries, so a Unary and Streaming Interceptor are registered to handle per +// RPC traces/metrics, and a Stats Handler is registered to handle per RPC +// attempt trace/metrics. These three components registered work together in +// conjunction, and do not work standalone. It is not supported to use this +// alongside another stats handler dial option. +func DialOption(to TraceOptions) grpc.DialOption { + csh := &clientStatsHandler{to: to} + return joinDialOptions(grpc.WithChainUnaryInterceptor(csh.unaryInterceptor), grpc.WithChainStreamInterceptor(csh.streamInterceptor), grpc.WithStatsHandler(csh)) +} + +// ServerOption returns a server option which enables OpenCensus instrumentation +// code for a grpc.Server. +// +// Server applications interested in instrumenting their grpc.Server should +// pass the server option returned from this function as the first argument to +// grpc.NewServer(). +// +// Using this option will always lead to instrumentation, however in order to +// use the data an exporter must be registered with the OpenCensus trace package +// for traces and the OpenCensus view package for metrics. Server side does not +// have retries, so a registered Stats Handler is the only option that is +// returned. It is not supported to use this alongside another stats handler +// server option. +func ServerOption(to TraceOptions) grpc.ServerOption { + return grpc.StatsHandler(&serverStatsHandler{to: to}) +} + +// createCallSpan creates a call span if tracing is enabled, which will be put +// in the context provided if created. +func (csh *clientStatsHandler) createCallSpan(ctx context.Context, method string) (context.Context, *trace.Span) { + var span *trace.Span + if !csh.to.DisableTrace { + mn := strings.Replace(removeLeadingSlash(method), "/", ".", -1) + ctx, span = trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS), trace.WithSpanKind(trace.SpanKindClient)) + } + return ctx, span +} + +// perCallTracesAndMetrics records per call spans and metrics. +func perCallTracesAndMetrics(err error, span *trace.Span, startTime time.Time, method string) { + s := status.Convert(err) + if span != nil { + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + span.End() + } + callLatency := float64(time.Since(startTime)) / float64(time.Millisecond) + ocstats.RecordWithOptions(context.Background(), + ocstats.WithTags( + tag.Upsert(keyClientMethod, removeLeadingSlash(method)), + tag.Upsert(keyClientStatus, canonicalString(s.Code())), + ), + ocstats.WithMeasurements( + clientAPILatency.M(callLatency), + ), + ) +} + +// unaryInterceptor handles per RPC context management. It also handles per RPC +// tracing and stats by creating a top level call span and recording the latency +// for the full RPC call. +func (csh *clientStatsHandler) unaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + startTime := time.Now() + ctx, span := csh.createCallSpan(ctx, method) + err := invoker(ctx, method, req, reply, cc, opts...) + perCallTracesAndMetrics(err, span, startTime, method) + return err +} + +// streamInterceptor handles per RPC context management. It also handles per RPC +// tracing and stats by creating a top level call span and recording the latency +// for the full RPC call. +func (csh *clientStatsHandler) streamInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + startTime := time.Now() + ctx, span := csh.createCallSpan(ctx, method) + callback := func(err error) { + perCallTracesAndMetrics(err, span, startTime, method) + } + opts = append([]grpc.CallOption{grpc.OnFinish(callback)}, opts...) + s, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + return nil, err + } + return s, nil +} + +type rpcInfo struct { + mi *metricsInfo + ti *traceInfo +} + +type rpcInfoKey struct{} + +func setRPCInfo(ctx context.Context, ri *rpcInfo) context.Context { + return context.WithValue(ctx, rpcInfoKey{}, ri) +} + +// getRPCInfo returns the rpcInfo stored in the context, or nil +// if there isn't one. +func getRPCInfo(ctx context.Context) *rpcInfo { + ri, _ := ctx.Value(rpcInfoKey{}).(*rpcInfo) + return ri +} + +// SpanContextFromContext returns the Span Context about the Span in the +// context. Returns false if no Span in the context. +func SpanContextFromContext(ctx context.Context) (trace.SpanContext, bool) { + ri, ok := ctx.Value(rpcInfoKey{}).(*rpcInfo) + if !ok { + return trace.SpanContext{}, false + } + if ri.ti == nil || ri.ti.span == nil { + return trace.SpanContext{}, false + } + sc := ri.ti.span.SpanContext() + return sc, true +} + +type clientStatsHandler struct { + to TraceOptions +} + +// TagConn exists to satisfy stats.Handler. +func (csh *clientStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn exists to satisfy stats.Handler. +func (csh *clientStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +// TagRPC implements per RPC attempt context management. +func (csh *clientStatsHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx, mi := csh.statsTagRPC(ctx, rti) + var ti *traceInfo + if !csh.to.DisableTrace { + ctx, ti = csh.traceTagRPC(ctx, rti) + } + ri := &rpcInfo{ + mi: mi, + ti: ti, + } + return setRPCInfo(ctx, ri) +} + +func (csh *clientStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + ri := getRPCInfo(ctx) + if ri == nil { + // Shouldn't happen because TagRPC populates this information. + return + } + recordRPCData(ctx, rs, ri.mi) + if !csh.to.DisableTrace { + populateSpan(ctx, rs, ri.ti) + } +} + +type serverStatsHandler struct { + to TraceOptions +} + +// TagConn exists to satisfy stats.Handler. +func (ssh *serverStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} + +// HandleConn exists to satisfy stats.Handler. +func (ssh *serverStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +// TagRPC implements per RPC context management. +func (ssh *serverStatsHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { + ctx, mi := ssh.statsTagRPC(ctx, rti) + var ti *traceInfo + if !ssh.to.DisableTrace { + ctx, ti = ssh.traceTagRPC(ctx, rti) + } + ri := &rpcInfo{ + mi: mi, + ti: ti, + } + return setRPCInfo(ctx, ri) +} + +// HandleRPC implements per RPC tracing and stats implementation. +func (ssh *serverStatsHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { + ri := getRPCInfo(ctx) + if ri == nil { + // Shouldn't happen because TagRPC populates this information. + return + } + recordRPCData(ctx, rs, ri.mi) + if !ssh.to.DisableTrace { + populateSpan(ctx, rs, ri.ti) + } +} diff --git a/stats/opencensus/server_metrics.go b/stats/opencensus/server_metrics.go new file mode 100644 index 000000000000..4f087c2209a7 --- /dev/null +++ b/stats/opencensus/server_metrics.go @@ -0,0 +1,135 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + keyServerMethod = tag.MustNewKey("grpc_server_method") + keyServerStatus = tag.MustNewKey("grpc_server_status") +) + +// Measures, which are recorded by server stats handler: Note that on gRPC's +// server side, the per rpc unit is truly per rpc, as there is no concept of a +// rpc attempt server side. +var ( + serverReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) // the collection/measurement point of this measure handles the /rpc aspect of it + serverReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) + serverReceivedCompressedBytesPerRPC = stats.Int64("grpc.io/server/received_compressed_bytes_per_rpc", "Total compressed bytes received across all messages per RPC.", stats.UnitBytes) + serverSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) + serverSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + serverSentCompressedBytesPerRPC = stats.Int64("grpc.io/server/sent_compressed_bytes_per_rpc", "Total compressed bytes sent in across all response messages per RPC.", stats.UnitBytes) + serverStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "The total number of server RPCs ever opened, including those that have not completed.", stats.UnitDimensionless) + serverLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) +) + +var ( + // ServerSentMessagesPerRPCView is the distribution of sent messages per + // RPC, keyed on method. + ServerSentMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_messages_per_rpc", + Description: "Distribution of sent messages per RPC, by method.", + TagKeys: []tag.Key{keyServerMethod}, + Measure: serverSentMessagesPerRPC, + Aggregation: countDistribution, + } + // ServerReceivedMessagesPerRPCView is the distribution of received messages + // per RPC, keyed on method. + ServerReceivedMessagesPerRPCView = &view.View{ + Name: "grpc.io/server/received_messages_per_rpc", + Description: "Distribution of received messages per RPC, by method.", + TagKeys: []tag.Key{keyServerMethod}, + Measure: serverReceivedMessagesPerRPC, + Aggregation: countDistribution, + } + // ServerSentBytesPerRPCView is the distribution of received bytes per RPC, + // keyed on method. + ServerSentBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_bytes_per_rpc", + Description: "Distribution of sent bytes per RPC, by method.", + Measure: serverSentBytesPerRPC, + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: bytesDistribution, + } + // ServerSentCompressedMessageBytesPerRPCView is the distribution of + // received compressed message bytes per RPC, keyed on method. + ServerSentCompressedMessageBytesPerRPCView = &view.View{ + Name: "grpc.io/server/sent_compressed_message_bytes_per_rpc", + Description: "Distribution of sent compressed message bytes per RPC, by method.", + Measure: serverSentCompressedBytesPerRPC, + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: bytesDistribution, + } + // ServerReceivedBytesPerRPCView is the distribution of sent bytes per RPC, + // keyed on method. + ServerReceivedBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_bytes_per_rpc", + Description: "Distribution of received bytes per RPC, by method.", + Measure: serverReceivedBytesPerRPC, + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: bytesDistribution, + } + // ServerReceivedCompressedMessageBytesPerRPCView is the distribution of + // sent compressed message bytes per RPC, keyed on method. + ServerReceivedCompressedMessageBytesPerRPCView = &view.View{ + Name: "grpc.io/server/received_compressed_message_bytes_per_rpc", + Description: "Distribution of received compressed message bytes per RPC, by method.", + Measure: serverReceivedCompressedBytesPerRPC, + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: bytesDistribution, + } + // ServerStartedRPCsView is the count of opened RPCs, keyed on method. + ServerStartedRPCsView = &view.View{ + Measure: serverStartedRPCs, + Name: "grpc.io/server/started_rpcs", + Description: "Number of opened server RPCs, by method.", + TagKeys: []tag.Key{keyServerMethod}, + Aggregation: view.Count(), + } + // ServerCompletedRPCsView is the count of completed RPCs, keyed on + // method and status. + ServerCompletedRPCsView = &view.View{ + Name: "grpc.io/server/completed_rpcs", + Description: "Number of completed RPCs by method and status.", + TagKeys: []tag.Key{keyServerMethod, keyServerStatus}, + Measure: serverLatency, + Aggregation: view.Count(), + } + // ServerLatencyView is the distribution of server latency in milliseconds + // per RPC, keyed on method. + ServerLatencyView = &view.View{ + Name: "grpc.io/server/server_latency", + Description: "Distribution of server latency in milliseconds, by method.", + TagKeys: []tag.Key{keyServerMethod}, + Measure: serverLatency, + Aggregation: millisecondsDistribution, + } +) + +// DefaultServerViews is the set of server views which are considered the +// minimum required to monitor server side performance. +var DefaultServerViews = []*view.View{ + ServerReceivedBytesPerRPCView, + ServerSentBytesPerRPCView, + ServerLatencyView, + ServerCompletedRPCsView, + ServerStartedRPCsView, +} diff --git a/stats/opencensus/stats.go b/stats/opencensus/stats.go new file mode 100644 index 000000000000..01cd0b9b157d --- /dev/null +++ b/stats/opencensus/stats.go @@ -0,0 +1,222 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "context" + "strings" + "sync/atomic" + "time" + + ocstats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +var logger = grpclog.Component("opencensus-instrumentation") + +var canonicalString = internal.CanonicalString.(func(codes.Code) string) + +var ( + // bounds separate variable for testing purposes. + bytesDistributionBounds = []float64{1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296} + bytesDistribution = view.Distribution(bytesDistributionBounds...) + millisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + countDistributionBounds = []float64{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536} + countDistribution = view.Distribution(countDistributionBounds...) +) + +func removeLeadingSlash(mn string) string { + return strings.TrimLeft(mn, "/") +} + +// metricsInfo is data used for recording metrics about the rpc attempt client +// side, and the overall rpc server side. +type metricsInfo struct { + // access these counts atomically for hedging in the future + // number of messages sent from side (client || server) + sentMsgs int64 + // number of bytes sent (within each message) from side (client || server) + sentBytes int64 + // number of bytes after compression (within each message) from side (client || server) + sentCompressedBytes int64 + // number of messages received on side (client || server) + recvMsgs int64 + // number of bytes received (within each message) received on side (client + // || server) + recvBytes int64 + // number of compressed bytes received (within each message) received on + // side (client || server) + recvCompressedBytes int64 + + startTime time.Time + method string +} + +// statsTagRPC creates a recording object to derive measurements from in the +// context, scoping the recordings to per RPC Attempt client side (scope of the +// context). It also populates the gRPC Metadata within the context with any +// opencensus specific tags set by the application in the context, binary +// encoded to send across the wire. +func (csh *clientStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) (context.Context, *metricsInfo) { + mi := &metricsInfo{ + startTime: time.Now(), + method: info.FullMethodName, + } + + // Populate gRPC Metadata with OpenCensus tag map if set by application. + if tm := tag.FromContext(ctx); tm != nil { + ctx = stats.SetTags(ctx, tag.Encode(tm)) + } + return ctx, mi +} + +// statsTagRPC creates a recording object to derive measurements from in the +// context, scoping the recordings to per RPC server side (scope of the +// context). It also deserializes the opencensus tags set in the context's gRPC +// Metadata, and adds a server method tag to the opencensus tags. +func (ssh *serverStatsHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) (context.Context, *metricsInfo) { + mi := &metricsInfo{ + startTime: time.Now(), + method: info.FullMethodName, + } + + if tagsBin := stats.Tags(ctx); tagsBin != nil { + if tags, err := tag.Decode(tagsBin); err == nil { + ctx = tag.NewContext(ctx, tags) + } + } + // We can ignore the error here because in the error case, the context + // passed in is returned. If the call errors, the server side application + // layer won't get this key server method information in the tag map, but + // this instrumentation code will function as normal. + ctx, _ = tag.New(ctx, tag.Upsert(keyServerMethod, removeLeadingSlash(info.FullMethodName))) + return ctx, mi +} + +func recordRPCData(ctx context.Context, s stats.RPCStats, mi *metricsInfo) { + if mi == nil { + // Shouldn't happen, as gRPC calls TagRPC which populates the metricsInfo in + // context. + logger.Error("ctx passed into stats handler metrics event handling has no metrics data present") + return + } + switch st := s.(type) { + case *stats.InHeader, *stats.OutHeader, *stats.InTrailer, *stats.OutTrailer: + // Headers and Trailers are not relevant to the measures, as the + // measures concern number of messages and bytes for messages. This + // aligns with flow control. + case *stats.Begin: + recordDataBegin(ctx, mi, st) + case *stats.OutPayload: + recordDataOutPayload(mi, st) + case *stats.InPayload: + recordDataInPayload(mi, st) + case *stats.End: + recordDataEnd(ctx, mi, st) + default: + // Shouldn't happen. gRPC calls into stats handler, and will never not + // be one of the types above. + logger.Errorf("Received unexpected stats type (%T) with data: %v", s, s) + } +} + +// recordDataBegin takes a measurement related to the RPC beginning, +// client/server started RPCs dependent on the caller. +func recordDataBegin(ctx context.Context, mi *metricsInfo, b *stats.Begin) { + if b.Client { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(keyClientMethod, removeLeadingSlash(mi.method))), + ocstats.WithMeasurements(clientStartedRPCs.M(1))) + return + } + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(keyServerMethod, removeLeadingSlash(mi.method))), + ocstats.WithMeasurements(serverStartedRPCs.M(1))) +} + +// recordDataOutPayload records the length in bytes of outgoing messages and +// increases total count of sent messages both stored in the RPCs (attempt on +// client side) context for use in taking measurements at RPC end. +func recordDataOutPayload(mi *metricsInfo, op *stats.OutPayload) { + atomic.AddInt64(&mi.sentMsgs, 1) + atomic.AddInt64(&mi.sentBytes, int64(op.Length)) + atomic.AddInt64(&mi.sentCompressedBytes, int64(op.CompressedLength)) +} + +// recordDataInPayload records the length in bytes of incoming messages and +// increases total count of sent messages both stored in the RPCs (attempt on +// client side) context for use in taking measurements at RPC end. +func recordDataInPayload(mi *metricsInfo, ip *stats.InPayload) { + atomic.AddInt64(&mi.recvMsgs, 1) + atomic.AddInt64(&mi.recvBytes, int64(ip.Length)) + atomic.AddInt64(&mi.recvCompressedBytes, int64(ip.CompressedLength)) +} + +// recordDataEnd takes per RPC measurements derived from information derived +// from the lifetime of the RPC (RPC attempt client side). +func recordDataEnd(ctx context.Context, mi *metricsInfo, e *stats.End) { + // latency bounds for distribution data (speced millisecond bounds) have + // fractions, thus need a float. + latency := float64(time.Since(mi.startTime)) / float64(time.Millisecond) + var st string + if e.Error != nil { + s, _ := status.FromError(e.Error) + st = canonicalString(s.Code()) + } else { + st = "OK" + } + + // TODO: Attach trace data through attachments?!?! + + if e.Client { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(keyClientMethod, removeLeadingSlash(mi.method)), + tag.Upsert(keyClientStatus, st)), + ocstats.WithMeasurements( + clientSentBytesPerRPC.M(atomic.LoadInt64(&mi.sentBytes)), + clientSentCompressedBytesPerRPC.M(atomic.LoadInt64(&mi.sentCompressedBytes)), + clientSentMessagesPerRPC.M(atomic.LoadInt64(&mi.sentMsgs)), + clientReceivedMessagesPerRPC.M(atomic.LoadInt64(&mi.recvMsgs)), + clientReceivedBytesPerRPC.M(atomic.LoadInt64(&mi.recvBytes)), + clientReceivedCompressedBytesPerRPC.M(atomic.LoadInt64(&mi.recvCompressedBytes)), + clientRoundtripLatency.M(latency), + clientServerLatency.M(latency), + )) + return + } + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( + tag.Upsert(keyServerMethod, removeLeadingSlash(mi.method)), + tag.Upsert(keyServerStatus, st), + ), + ocstats.WithMeasurements( + serverSentBytesPerRPC.M(atomic.LoadInt64(&mi.sentBytes)), + serverSentCompressedBytesPerRPC.M(atomic.LoadInt64(&mi.sentCompressedBytes)), + serverSentMessagesPerRPC.M(atomic.LoadInt64(&mi.sentMsgs)), + serverReceivedMessagesPerRPC.M(atomic.LoadInt64(&mi.recvMsgs)), + serverReceivedBytesPerRPC.M(atomic.LoadInt64(&mi.recvBytes)), + serverReceivedCompressedBytesPerRPC.M(atomic.LoadInt64(&mi.recvCompressedBytes)), + serverLatency.M(latency))) +} diff --git a/stats/opencensus/trace.go b/stats/opencensus/trace.go new file mode 100644 index 000000000000..f41cb838adc9 --- /dev/null +++ b/stats/opencensus/trace.go @@ -0,0 +1,122 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package opencensus + +import ( + "context" + "strings" + "sync/atomic" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// traceInfo is data used for recording traces. +type traceInfo struct { + span *trace.Span + countSentMsg uint32 + countRecvMsg uint32 +} + +// traceTagRPC populates context with a new span, and serializes information +// about this span into gRPC Metadata. +func (csh *clientStatsHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) (context.Context, *traceInfo) { + // TODO: get consensus on whether this method name of "s.m" is correct. + mn := "Attempt." + strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) + // Returned context is ignored because will populate context with data that + // wraps the span instead. Don't set span kind client on this attempt span + // to prevent backend from prepending span name with "Sent.". + _, span := trace.StartSpan(ctx, mn, trace.WithSampler(csh.to.TS)) + + tcBin := propagation.Binary(span.SpanContext()) + return stats.SetTrace(ctx, tcBin), &traceInfo{ + span: span, + countSentMsg: 0, // msg events scoped to scope of context, per attempt client side + countRecvMsg: 0, + } +} + +// traceTagRPC populates context with new span data, with a parent based on the +// spanContext deserialized from context passed in (wire data in gRPC metadata) +// if present. +func (ssh *serverStatsHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) (context.Context, *traceInfo) { + mn := strings.Replace(removeLeadingSlash(rti.FullMethodName), "/", ".", -1) + + var span *trace.Span + if sc, ok := propagation.FromBinary(stats.Trace(ctx)); ok { + // Returned context is ignored because will populate context with data + // that wraps the span instead. + _, span = trace.StartSpanWithRemoteParent(ctx, mn, sc, trace.WithSpanKind(trace.SpanKindServer), trace.WithSampler(ssh.to.TS)) + span.AddLink(trace.Link{TraceID: sc.TraceID, SpanID: sc.SpanID, Type: trace.LinkTypeChild}) + } else { + // Returned context is ignored because will populate context with data + // that wraps the span instead. + _, span = trace.StartSpan(ctx, mn, trace.WithSpanKind(trace.SpanKindServer), trace.WithSampler(ssh.to.TS)) + } + + return ctx, &traceInfo{ + span: span, + countSentMsg: 0, + countRecvMsg: 0, + } +} + +// populateSpan populates span information based on stats passed in (invariants +// of the RPC lifecycle), and also ends span which triggers the span to be +// exported. +func populateSpan(ctx context.Context, rs stats.RPCStats, ti *traceInfo) { + if ti == nil || ti.span == nil { + // Shouldn't happen, tagRPC call comes before this function gets called + // which populates this information. + logger.Error("ctx passed into stats handler tracing event handling has no span present") + return + } + span := ti.span + + switch rs := rs.(type) { + case *stats.Begin: + // Note: Go always added these attributes even though they are not + // defined by the OpenCensus gRPC spec. Thus, they are unimportant for + // correctness. + span.AddAttributes( + trace.BoolAttribute("Client", rs.Client), + trace.BoolAttribute("FailFast", rs.FailFast), + ) + case *stats.InPayload: + // message id - "must be calculated as two different counters starting + // from 1 one for sent messages and one for received messages." + mi := atomic.AddUint32(&ti.countRecvMsg, 1) + span.AddMessageReceiveEvent(int64(mi), int64(rs.Length), int64(rs.CompressedLength)) + case *stats.OutPayload: + mi := atomic.AddUint32(&ti.countSentMsg, 1) + span.AddMessageSendEvent(int64(mi), int64(rs.Length), int64(rs.CompressedLength)) + case *stats.End: + if rs.Error != nil { + // "The mapping between gRPC canonical codes and OpenCensus codes + // can be found here", which implies 1:1 mapping to gRPC statuses + // (OpenCensus statuses are based off gRPC statuses and a subset). + s := status.Convert(rs.Error) + span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) + } else { + span.SetStatus(trace.Status{Code: trace.StatusCodeOK}) // could get rid of this else conditional and just leave as 0 value, but this makes it explicit + } + span.End() + } +} diff --git a/stats/stats.go b/stats/stats.go index 63e476ee7ff8..7a552a9b7871 100644 --- a/stats/stats.go +++ b/stats/stats.go @@ -36,15 +36,22 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC begins. +// Begin contains stats when an RPC attempt begins. // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. Client bool - // BeginTime is the time when the RPC begins. + // BeginTime is the time when the RPC attempt begins. BeginTime time.Time // FailFast indicates if this RPC is failfast. FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool } // IsClient indicates if the stats information is from client side. @@ -60,10 +67,18 @@ type InPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -122,9 +137,15 @@ type OutPayload struct { Payload interface{} // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/stats/stats_test.go b/stats/stats_test.go index 306f2f6b8e90..b0b3df70c9d3 100644 --- a/stats/stats_test.go +++ b/stats/stats_test.go @@ -30,6 +30,7 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" @@ -175,10 +176,10 @@ func (s *testServer) StreamingOutputCall(in *testpb.StreamingOutputCallRequest, // func, modified as needed, and then started with its startServer method. // It should be cleaned up with the tearDown method. type test struct { - t *testing.T - compress string - clientStatsHandler stats.Handler - serverStatsHandler stats.Handler + t *testing.T + compress string + clientStatsHandlers []stats.Handler + serverStatsHandlers []stats.Handler testServer testgrpc.TestServiceServer // nil means none // srv and srvAddr are set once startServer is called. @@ -203,12 +204,12 @@ type testConfig struct { // newTest returns a new test using the provided testing.T and // environment. It is returned with default values. Tests should // modify it before calling its startServer and clientConn methods. -func newTest(t *testing.T, tc *testConfig, ch stats.Handler, sh stats.Handler) *test { +func newTest(t *testing.T, tc *testConfig, chs []stats.Handler, shs []stats.Handler) *test { te := &test{ - t: t, - compress: tc.compress, - clientStatsHandler: ch, - serverStatsHandler: sh, + t: t, + compress: tc.compress, + clientStatsHandlers: chs, + serverStatsHandlers: shs, } return te } @@ -228,8 +229,8 @@ func (te *test) startServer(ts testgrpc.TestServiceServer) { grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), ) } - if te.serverStatsHandler != nil { - opts = append(opts, grpc.StatsHandler(te.serverStatsHandler)) + for _, sh := range te.serverStatsHandlers { + opts = append(opts, grpc.StatsHandler(sh)) } s := grpc.NewServer(opts...) te.srv = s @@ -246,7 +247,7 @@ func (te *test) clientConn() *grpc.ClientConn { return te.cc } opts := []grpc.DialOption{ - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), grpc.WithUserAgent("test/0.0.1"), } @@ -256,8 +257,8 @@ func (te *test) clientConn() *grpc.ClientConn { grpc.WithDecompressor(grpc.NewGZIPDecompressor()), ) } - if te.clientStatsHandler != nil { - opts = append(opts, grpc.WithStatsHandler(te.clientStatsHandler)) + for _, sh := range te.clientStatsHandlers { + opts = append(opts, grpc.WithStatsHandler(sh)) } var err error @@ -407,15 +408,17 @@ func (te *test) doServerStreamCall(c *rpcConfig) (*testpb.StreamingOutputCallReq } type expectedData struct { - method string - serverAddr string - compression string - reqIdx int - requests []proto.Message - respIdx int - responses []proto.Message - err error - failfast bool + method string + isClientStream bool + isServerStream bool + serverAddr string + compression string + reqIdx int + requests []proto.Message + respIdx int + responses []proto.Message + err error + failfast bool } type gotData struct { @@ -456,6 +459,12 @@ func checkBegin(t *testing.T, d *gotData, e *expectedData) { t.Fatalf("st.FailFast = %v, want %v", st.FailFast, e.failfast) } } + if st.IsClientStream != e.isClientStream { + t.Fatalf("st.IsClientStream = %v, want %v", st.IsClientStream, e.isClientStream) + } + if st.IsServerStream != e.isServerStream { + t.Fatalf("st.IsServerStream = %v, want %v", st.IsServerStream, e.isServerStream) + } } func checkInHeader(t *testing.T, d *gotData, e *expectedData) { @@ -559,9 +568,9 @@ func checkInPayload(t *testing.T, d *gotData, e *expectedData) { } // Below are sanity checks that WireLength and RecvTime are populated. // TODO: check values of WireLength and RecvTime. - if len(st.Data) > 0 && st.WireLength == 0 { + if len(st.Data) > 0 && st.CompressedLength == 0 { t.Fatalf("st.WireLength = %v with non-empty data, want ", - st.WireLength) + st.CompressedLength) } if st.RecvTime.IsZero() { t.Fatalf("st.ReceivedTime = %v, want ", st.RecvTime) @@ -745,7 +754,7 @@ func checkEnd(t *testing.T, d *gotData, e *expectedData) { } } -func checkConnBegin(t *testing.T, d *gotData, e *expectedData) { +func checkConnBegin(t *testing.T, d *gotData) { var ( ok bool st *stats.ConnBegin @@ -759,7 +768,7 @@ func checkConnBegin(t *testing.T, d *gotData, e *expectedData) { st.IsClient() // TODO remove this. } -func checkConnEnd(t *testing.T, d *gotData, e *expectedData) { +func checkConnEnd(t *testing.T, d *gotData) { var ( ok bool st *stats.ConnEnd @@ -807,9 +816,9 @@ func checkConnStats(t *testing.T, got []*gotData) { t.Fatalf("got %v stats, want even positive number", len(got)) } // The first conn stats must be a ConnBegin. - checkConnBegin(t, got[0], nil) + checkConnBegin(t, got[0]) // The last conn stats must be a ConnEnd. - checkConnEnd(t, got[len(got)-1], nil) + checkConnEnd(t, got[len(got)-1]) } func checkServerStats(t *testing.T, got []*gotData, expect *expectedData, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) { @@ -837,7 +846,7 @@ func checkServerStats(t *testing.T, got []*gotData, expect *expectedData, checkF func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []func(t *testing.T, d *gotData, e *expectedData)) { h := &statshandler{} - te := newTest(t, tc, nil, h) + te := newTest(t, tc, nil, []stats.Handler{h}) te.startServer(&testServer{}) defer te.tearDown() @@ -847,6 +856,9 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f err error method string + isClientStream bool + isServerStream bool + req proto.Message resp proto.Message e error @@ -864,14 +876,18 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f reqs, resp, e = te.doClientStreamCall(cc) resps = []proto.Message{resp} err = e + isClientStream = true case serverStreamRPC: method = "/grpc.testing.TestService/StreamingOutputCall" req, resps, e = te.doServerStreamCall(cc) reqs = []proto.Message{req} err = e + isServerStream = true case fullDuplexStreamRPC: method = "/grpc.testing.TestService/FullDuplexCall" reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) + isClientStream = true + isServerStream = true } if cc.success != (err == nil) { t.Fatalf("cc.success: %v, got error: %v", cc.success, err) @@ -900,12 +916,14 @@ func testServerStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs []f } expect := &expectedData{ - serverAddr: te.srvAddr, - compression: tc.compress, - method: method, - requests: reqs, - responses: resps, - err: err, + serverAddr: te.srvAddr, + compression: tc.compress, + method: method, + requests: reqs, + responses: resps, + err: err, + isClientStream: isClientStream, + isServerStream: isServerStream, } h.mu.Lock() @@ -1128,7 +1146,7 @@ func checkClientStats(t *testing.T, got []*gotData, expect *expectedData, checkF func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map[int]*checkFuncWithCount) { h := &statshandler{} - te := newTest(t, tc, h, nil) + te := newTest(t, tc, []stats.Handler{h}, nil) te.startServer(&testServer{}) defer te.tearDown() @@ -1138,6 +1156,9 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map method string err error + isClientStream bool + isServerStream bool + req proto.Message resp proto.Message e error @@ -1154,14 +1175,18 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map reqs, resp, e = te.doClientStreamCall(cc) resps = []proto.Message{resp} err = e + isClientStream = true case serverStreamRPC: method = "/grpc.testing.TestService/StreamingOutputCall" req, resps, e = te.doServerStreamCall(cc) reqs = []proto.Message{req} err = e + isServerStream = true case fullDuplexStreamRPC: method = "/grpc.testing.TestService/FullDuplexCall" reqs, resps, err = te.doFullDuplexCallRoundtrip(cc) + isClientStream = true + isServerStream = true } if cc.success != (err == nil) { t.Fatalf("cc.success: %v, got error: %v", cc.success, err) @@ -1194,13 +1219,15 @@ func testClientStats(t *testing.T, tc *testConfig, cc *rpcConfig, checkFuncs map } expect := &expectedData{ - serverAddr: te.srvAddr, - compression: tc.compress, - method: method, - requests: reqs, - responses: resps, - failfast: cc.failfast, - err: err, + serverAddr: te.srvAddr, + compression: tc.compress, + method: method, + requests: reqs, + responses: resps, + failfast: cc.failfast, + err: err, + isClientStream: isClientStream, + isServerStream: isServerStream, } h.mu.Lock() @@ -1348,3 +1375,95 @@ func (s) TestTrace(t *testing.T) { t.Errorf("OutgoingTrace(%v) = %v; want nil", ctx, tr) } } + +func (s) TestMultipleClientStatsHandler(t *testing.T) { + h := &statshandler{} + tc := &testConfig{compress: ""} + te := newTest(t, tc, []stats.Handler{h, h}, nil) + te.startServer(&testServer{}) + defer te.tearDown() + + cc := &rpcConfig{success: false, failfast: false, callType: unaryRPC} + _, _, err := te.doUnaryCall(cc) + if cc.success != (err == nil) { + t.Fatalf("cc.success: %v, got error: %v", cc.success, err) + } + te.cc.Close() + te.srv.GracefulStop() // Wait for the server to stop. + + for start := time.Now(); time.Since(start) < defaultTestTimeout; { + h.mu.Lock() + if _, ok := h.gotRPC[len(h.gotRPC)-1].s.(*stats.End); ok && len(h.gotRPC) == 12 { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + for start := time.Now(); time.Since(start) < defaultTestTimeout; { + h.mu.Lock() + if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok && len(h.gotConn) == 4 { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + // Each RPC generates 6 stats events on the client-side, times 2 StatsHandler + if len(h.gotRPC) != 12 { + t.Fatalf("h.gotRPC: unexpected amount of RPCStats: %v != %v", len(h.gotRPC), 12) + } + + // Each connection generates 4 conn events on the client-side, times 2 StatsHandler + if len(h.gotConn) != 4 { + t.Fatalf("h.gotConn: unexpected amount of ConnStats: %v != %v", len(h.gotConn), 4) + } +} + +func (s) TestMultipleServerStatsHandler(t *testing.T) { + h := &statshandler{} + tc := &testConfig{compress: ""} + te := newTest(t, tc, nil, []stats.Handler{h, h}) + te.startServer(&testServer{}) + defer te.tearDown() + + cc := &rpcConfig{success: false, failfast: false, callType: unaryRPC} + _, _, err := te.doUnaryCall(cc) + if cc.success != (err == nil) { + t.Fatalf("cc.success: %v, got error: %v", cc.success, err) + } + te.cc.Close() + te.srv.GracefulStop() // Wait for the server to stop. + + for start := time.Now(); time.Since(start) < defaultTestTimeout; { + h.mu.Lock() + if _, ok := h.gotRPC[len(h.gotRPC)-1].s.(*stats.End); ok { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + for start := time.Now(); time.Since(start) < defaultTestTimeout; { + h.mu.Lock() + if _, ok := h.gotConn[len(h.gotConn)-1].s.(*stats.ConnEnd); ok { + h.mu.Unlock() + break + } + h.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + // Each RPC generates 6 stats events on the server-side, times 2 StatsHandler + if len(h.gotRPC) != 12 { + t.Fatalf("h.gotRPC: unexpected amount of RPCStats: %v != %v", len(h.gotRPC), 12) + } + + // Each connection generates 4 conn events on the server-side, times 2 StatsHandler + if len(h.gotConn) != 4 { + t.Fatalf("h.gotConn: unexpected amount of ConnStats: %v != %v", len(h.gotConn), 4) + } +} diff --git a/status/status.go b/status/status.go index 54d187186b8f..bcf2e4d81beb 100644 --- a/status/status.go +++ b/status/status.go @@ -29,6 +29,7 @@ package status import ( "context" + "errors" "fmt" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -73,19 +74,52 @@ func FromProto(s *spb.Status) *Status { return status.FromProto(s) } -// FromError returns a Status representing err if it was produced by this -// package or has a method `GRPCStatus() *Status`. -// If err is nil, a Status is returned with codes.OK and no message. -// Otherwise, ok is false and a Status is returned with codes.Unknown and -// the original error message. +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. +// +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + if gs.GRPCStatus() == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return gs.GRPCStatus(), true + } + var gs grpcstatus + if errors.As(err, &gs) { + if gs.GRPCStatus() == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := gs.GRPCStatus().Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } @@ -97,33 +131,30 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } -// FromContextError converts a context error into a Status. It returns a -// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is -// non-nil and not a context error. +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. func FromContextError(err error) *Status { - switch err { - case nil: + if err == nil { return nil - case context.DeadlineExceeded: + } + if errors.Is(err, context.DeadlineExceeded) { return New(codes.DeadlineExceeded, err.Error()) - case context.Canceled: + } + if errors.Is(err, context.Canceled) { return New(codes.Canceled, err.Error()) - default: - return New(codes.Unknown, err.Error()) } + return New(codes.Unknown, err.Error()) } diff --git a/status/status_ext_test.go b/status/status_ext_test.go index 4c1efc56320f..33c8c71a0062 100644 --- a/status/status_ext_test.go +++ b/status/status_ext_test.go @@ -26,7 +26,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/status" - "google.golang.org/grpc/test/grpc_testing" + + testpb "google.golang.org/grpc/interop/grpc_testing" ) type s struct { @@ -49,7 +50,7 @@ func errWithDetails(t *testing.T, s *status.Status, details ...proto.Message) er func (s) TestErrorIs(t *testing.T) { // Test errors. testErr := status.Error(codes.Internal, "internal server error") - testErrWithDetails := errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}) + testErrWithDetails := errWithDetails(t, status.New(codes.Internal, "internal server error"), &testpb.Empty{}) // Test cases. testCases := []struct { @@ -62,8 +63,8 @@ func (s) TestErrorIs(t *testing.T) { {err1: testErr, err2: status.Error(codes.Unknown, "internal server error"), want: false}, {err1: testErr, err2: errors.New("non-grpc error"), want: false}, {err1: testErrWithDetails, err2: status.Error(codes.Internal, "internal server error"), want: false}, - {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}), want: true}, - {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &grpc_testing.Empty{}, &grpc_testing.Empty{}), want: false}, + {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &testpb.Empty{}), want: true}, + {err1: testErrWithDetails, err2: errWithDetails(t, status.New(codes.Internal, "internal server error"), &testpb.Empty{}, &testpb.Empty{}), want: false}, } for _, tc := range testCases { diff --git a/status/status_test.go b/status/status_test.go index 839a3c390ede..216d18bb27b9 100644 --- a/status/status_test.go +++ b/status/status_test.go @@ -32,6 +32,7 @@ import ( cpb "google.golang.org/genproto/googleapis/rpc/code" epb "google.golang.org/genproto/googleapis/rpc/errdetails" spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/status" @@ -192,6 +193,97 @@ func (s) TestFromErrorUnknownError(t *testing.T) { } } +func (s) TestFromErrorWrapped(t *testing.T) { + const code, message = codes.Internal, "test description" + err := fmt.Errorf("wrapped error: %w", Error(code, message)) + s, ok := FromError(err) + if !ok || s.Code() != code || s.Message() != err.Error() || s.Err() == nil { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) + } +} + +type customErrorNilStatus struct { +} + +func (c customErrorNilStatus) Error() string { + return "test" +} + +func (c customErrorNilStatus) GRPCStatus() *Status { + return nil +} + +func (s) TestFromErrorImplementsInterfaceReturnsOKStatus(t *testing.T) { + err := customErrorNilStatus{} + s, ok := FromError(err) + if ok || s.Code() != codes.Unknown || s.Message() != err.Error() { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, codes.Unknown, err.Error()) + } +} + +func (s) TestFromErrorImplementsInterfaceReturnsOKStatusWrapped(t *testing.T) { + err := fmt.Errorf("wrapping: %w", customErrorNilStatus{}) + s, ok := FromError(err) + if ok || s.Code() != codes.Unknown || s.Message() != err.Error() { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, codes.Unknown, err.Error()) + } +} + +func (s) TestFromErrorImplementsInterfaceWrapped(t *testing.T) { + const code, message = codes.Internal, "test description" + err := fmt.Errorf("wrapped error: %w", customError{Code: code, Message: message}) + s, ok := FromError(err) + if !ok || s.Code() != code || s.Message() != err.Error() || s.Err() == nil { + t.Fatalf("FromError(%v) = %v, %v; want , true", err, s, ok, code, message) + } +} + +func (s) TestCode(t *testing.T) { + const code = codes.Internal + err := Error(code, "test description") + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + +func (s) TestCodeOK(t *testing.T) { + if s, code := Code(nil), codes.OK; s != code { + t.Fatalf("Code(%v) = %v; want ", nil, s, code) + } +} + +func (s) TestCodeImplementsInterface(t *testing.T) { + const code = codes.Internal + err := customError{Code: code, Message: "test description"} + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + +func (s) TestCodeUnknownError(t *testing.T) { + const code = codes.Unknown + err := errors.New("unknown error") + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + +func (s) TestCodeWrapped(t *testing.T) { + const code = codes.Internal + err := fmt.Errorf("wrapped: %w", Error(code, "test description")) + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + +func (s) TestCodeImplementsInterfaceWrapped(t *testing.T) { + const code = codes.Internal + err := fmt.Errorf("wrapped: %w", customError{Code: code, Message: "test description"}) + if s := Code(err); s != code { + t.Fatalf("Code(%v) = %v; want ", err, s, code) + } +} + func (s) TestConvertKnownError(t *testing.T) { code, message := codes.Internal, "test description" err := Error(code, message) @@ -364,6 +456,8 @@ func (s) TestFromContextError(t *testing.T) { {in: context.DeadlineExceeded, want: New(codes.DeadlineExceeded, context.DeadlineExceeded.Error())}, {in: context.Canceled, want: New(codes.Canceled, context.Canceled.Error())}, {in: errors.New("other"), want: New(codes.Unknown, "other")}, + {in: fmt.Errorf("wrapped: %w", context.DeadlineExceeded), want: New(codes.DeadlineExceeded, "wrapped: "+context.DeadlineExceeded.Error())}, + {in: fmt.Errorf("wrapped: %w", context.Canceled), want: New(codes.Canceled, "wrapped: "+context.Canceled.Error())}, } for _, tc := range testCases { got := FromContextError(tc.in) diff --git a/stream.go b/stream.go index 77d25742cc3d..de32a7597145 100644 --- a/stream.go +++ b/stream.go @@ -36,8 +36,10 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -46,20 +48,28 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error -// StreamDesc represents a streaming RPC service's method specification. +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends } // Stream defines the common interface a client or server stream has to satisfy. @@ -113,6 +123,9 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On @@ -131,17 +144,22 @@ type ClientStream interface { // To ensure resources are not leaked due to the stream returned, one of the following // actions must be performed: // -// 1. Call Close on the ClientConn. -// 2. Cancel the context provided. -// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated -// client-streaming RPC, for instance, might use the helper function -// CloseAndRecv (note that CloseSend does not Recv, therefore is not -// guaranteed to release all resources). -// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. // // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return nil, err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -158,6 +176,20 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -181,6 +213,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -268,33 +307,6 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client if c.creds != nil { callHdr.Creds = c.creds } - var trInfo *traceInfo - if EnableTracing { - trInfo = &traceInfo{ - tr: trace.New("grpc.Sent."+methodFamily(method), method), - firstLine: firstLine{ - client: true, - }, - } - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = time.Until(deadline) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - } - ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) - sh := cc.dopts.copts.StatsHandler - var beginTime time.Time - if sh != nil { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) - beginTime = time.Now() - begin := &stats.Begin{ - Client: true, - BeginTime: beginTime, - FailFast: c.failFast, - } - sh.HandleRPC(ctx, begin) - } cs := &clientStream{ callHdr: callHdr, @@ -308,29 +320,41 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cp: cp, comp: comp, cancel: cancel, - beginTime: beginTime, firstAttempt: true, onCommit: onCommit, } if !cc.dopts.disableRetry { cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) } - cs.binlog = binarylog.GetMethodLogger(method) - - // Only this initial attempt has stats/tracing. - // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. - if err := cs.newAttemptLocked(sh, trInfo); err != nil { - cs.finish(err) - return nil, err + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } } - op := func(a *csAttempt) error { return a.newStream() } + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { - cs.finish(err) return nil, err } - if cs.binlog != nil { + if len(cs.binlogs) != 0 { md, _ := metadata.FromOutgoingContext(ctx) logEntry := &binarylog.ClientHeader{ OnClientSide: true, @@ -344,7 +368,9 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client logEntry.Timeout = 0 } } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } if desc != unaryStreamDesc { @@ -365,63 +391,123 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client return cs, nil } -// newAttemptLocked creates a new attempt with a transport. -// If it succeeds, then it replaces clientStream's attempt with this new attempt. -func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { - newAttempt := &csAttempt{ - cs: cs, - dc: cs.cc.dopts.dc, - statsHandler: sh, - trInfo: trInfo, +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) } - defer func() { - if retErr != nil { - // This attempt is not set in the clientStream, so it's finish won't - // be called. Call it here for stats and trace in case they are not - // nil. - newAttempt.finish(retErr) + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, } - }() + sh.HandleRPC(ctx, begin) + } - if err := cs.ctx.Err(); err != nil { - return toRPCErr(err) + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) } - ctx := cs.ctx - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. - ctx = grpcutil.WithExtraMetadata(cs.ctx, metadata.Pairs( + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), )) } - t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method) + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } return err } - if trInfo != nil { - trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) } - newAttempt.t = t - newAttempt.done = done - cs.attempt = newAttempt return nil } func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries - s, err := a.t.NewStream(cs.ctx, cs.callHdr) + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { - if _, ok := err.(transport.PerformedIOError); ok { - // Return without converting to an RPC error so retry code can - // inspect. + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. return err } - return toRPCErr(err) + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) } - cs.attempt.s = s - cs.attempt.p = &parser{r: s} + a.s = s + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -439,8 +525,7 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream - beginTime time.Time + sentLast bool // sent an end stream methodConfig *MethodConfig @@ -448,7 +533,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlogs []binarylog.MethodLogger // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -480,11 +565,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -497,7 +583,13 @@ type csAttempt struct { // and cleared when the finish method is called. trInfo *traceInfo - statsHandler stats.Handler + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool } func (cs *clientStream) commitAttemptLocked() { @@ -515,85 +607,76 @@ func (cs *clientStream) commitAttempt() { } // shouldRetry returns nil if the RPC should be retried; otherwise it returns -// the error that should be returned by the operation. -func (cs *clientStream) shouldRetry(err error) error { - unprocessed := false - if cs.attempt.s == nil { - pioErr, ok := err.(transport.PerformedIOError) - if ok { - // Unwrap error. - err = toRPCErr(pioErr.Err) - } else { - unprocessed = true - } - if !ok && !cs.callInfo.failFast { - // In the event of a non-IO operation error from NewStream, we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil - } +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err } - if cs.finished || cs.committed { - // RPC is finished or committed; cannot retry. - return err + if a.s == nil && a.allowTransparentRetry { + return true, nil } // Wait for the trailers. - if cs.attempt.s != nil { - <-cs.attempt.s.Done() - unprocessed = cs.attempt.s.Unprocessed() + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - return nil + return true, nil } if cs.cc.dopts.disableRetry { - return err + return false, err } pushback := 0 hasPushback := false - if cs.attempt.s != nil { - if !cs.attempt.s.TrailersOnly() { - return err + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.s.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } hasPushback = true } else if len(sps) > 1 { channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. - return err + return false, err } } var code codes.Code - if cs.attempt.s != nil { - code = cs.attempt.s.Status().Code() + if a.s != nil { + code = a.s.Status().Code() } else { - code = status.Convert(err).Code() + code = status.Code(err) } rp := cs.methodConfig.RetryPolicy if rp == nil || !rp.RetryableStatusCodes[code] { - return err + return false, err } // Note: the ordering here is important; we count this as a failure // only if the code matched a retryable code. if cs.retryThrottler.throttle() { - return err + return false, err } if cs.numRetries+1 >= rp.MaxAttempts { - return err + return false, err } var dur time.Duration @@ -616,26 +699,32 @@ func (cs *clientStream) shouldRetry(err error) error { select { case <-t.C: cs.numRetries++ - return nil + return false, nil case <-cs.ctx.Done(): t.Stop() - return status.FromContextError(cs.ctx.Err()).Err() + return false, status.FromContextError(cs.ctx.Err()).Err() } } // Returns nil if a retry was performed and succeeded; error otherwise. -func (cs *clientStream) retryLocked(lastErr error) error { +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { for { - cs.attempt.finish(lastErr) - if err := cs.shouldRetry(lastErr); err != nil { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { cs.commitAttemptLocked() return err } cs.firstAttempt = false - if err := cs.newAttemptLocked(nil, nil); err != nil { + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. return err } - if lastErr = cs.replayBufferLocked(); lastErr == nil { + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil } } @@ -645,7 +734,10 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - return cs.attempt.s.Context() + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx } func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { @@ -653,7 +745,23 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) for { if cs.committed { cs.mu.Unlock() - return op(cs.attempt) + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } } a := cs.attempt cs.mu.Unlock() @@ -671,7 +779,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) cs.mu.Unlock() return err } - if err := cs.retryLocked(err); err != nil { + if err := cs.retryLocked(a, err); err != nil { cs.mu.Unlock() return err } @@ -680,17 +788,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if cs.binlog != nil && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -699,10 +815,12 @@ func (cs *clientStream) Header() (metadata.MD, error) { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } - return m, err + return m, nil } func (cs *clientStream) Trailer() metadata.MD { @@ -720,10 +838,9 @@ func (cs *clientStream) Trailer() metadata.MD { return cs.attempt.s.Trailer() } -func (cs *clientStream) replayBufferLocked() error { - a := cs.attempt +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { for _, f := range cs.buffer { - if err := f(a); err != nil { + if err := f(attempt); err != nil { return err } } @@ -771,47 +888,48 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { if len(payload) > *cs.callInfo.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) } - msgBytes := data // Store the pointer before setting to nil. For binary logging. op := func(a *csAttempt) error { - err := a.sendMsg(m, hdr, payload, data) - // nil out the message and uncomp when replaying; they are only needed for - // stats which is disabled for subsequent attempts. - m, data = nil, nil - return err + return a.sendMsg(m, hdr, payload, data) } err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ClientMessage{ + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: msgBytes, - }) + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } } - return + return err } func (cs *clientStream) RecvMsg(m interface{}) error { - if cs.binlog != nil && !cs.serverHeaderBinlogged { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() } var recvInfo *payloadInfo - if cs.binlog != nil { + if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) }, cs.commitAttemptLocked) - if cs.binlog != nil && err == nil { - cs.binlog.Log(&binarylog.ServerMessage{ + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ OnClientSide: true, Message: recvInfo.uncompressedBytes, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - if cs.binlog != nil { + if len(cs.binlogs) != 0 { // finish will not log Trailer. Log Trailer here. logEntry := &binarylog.ServerTrailer{ OnClientSide: true, @@ -824,7 +942,9 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if peer, ok := peer.FromContext(cs.Context()); ok { logEntry.PeerAddr = peer.Addr } - cs.binlog.Log(logEntry) + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } return err @@ -845,10 +965,13 @@ func (cs *clientStream) CloseSend() error { return nil } cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) - if cs.binlog != nil { - cs.binlog.Log(&binarylog.ClientHalfClose{ + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } } // We never returned an error here for reasons. return nil @@ -865,6 +988,9 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -881,10 +1007,13 @@ func (cs *clientStream) finish(err error) { // // Only one of cancel or trailer needs to be logged. In the cases where // users don't call RecvMsg, users must have already canceled the RPC. - if cs.binlog != nil && status.Code(err) == codes.Canceled { - cs.binlog.Log(&binarylog.Cancel{ + if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { + c := &binarylog.Cancel{ OnClientSide: true, - }) + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } } if err == nil { cs.retryThrottler.successfulRPC() @@ -917,8 +1046,8 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { } return io.EOF } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) } if channelz.IsOn() { a.t.IncrMsgSent() @@ -928,7 +1057,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { cs := a.cs - if a.statsHandler != nil && payInfo == nil { + if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} } @@ -956,6 +1085,7 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } return io.EOF // indicates successful end of stream. } + return toRPCErr(err) } if a.trInfo != nil { @@ -965,15 +1095,16 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { } a.mu.Unlock() } - if a.statsHandler != nil { - a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ Client: true, RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1012,12 +1143,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -1025,15 +1156,15 @@ func (a *csAttempt) finish(err error) { ServerLoad: balancerload.Parse(tr), }) } - if a.statsHandler != nil { + for _, sh := range a.statsHandlers { end := &stats.End{ Client: true, - BeginTime: a.cs.beginTime, + BeginTime: a.beginTime, EndTime: time.Now(), Trailer: tr, Error: err, } - a.statsHandler.HandleRPC(a.cs.ctx, end) + sh.HandleRPC(a.ctx, end) } if a.trInfo != nil && a.trInfo.tr != nil { if err == nil { @@ -1139,17 +1270,22 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1338,8 +1474,10 @@ func (as *addrConnStream) finish(err error) { // ServerStream defines the server-side behavior of a streaming RPC. // -// All errors returned from ServerStream methods are compatible with the -// status package. +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -1371,6 +1509,9 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On @@ -1396,13 +1537,15 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo - statsHandler stats.Handler + statsHandler []stats.Handler - binlog *binarylog.MethodLogger + binlogs []binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1422,17 +1565,29 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) - if ss.binlog != nil && !ss.serverHeaderBinlogged { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } return err } @@ -1441,6 +1596,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } @@ -1473,6 +1631,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1486,20 +1651,28 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } - if ss.binlog != nil { + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() - ss.binlog.Log(&binarylog.ServerHeader{ + sh := &binarylog.ServerHeader{ Header: h, - }) + } ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } } - ss.binlog.Log(&binarylog.ServerMessage{ + sm := &binarylog.ServerMessage{ Message: data, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) + } } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } } return nil } @@ -1533,13 +1706,16 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } }() var payInfo *payloadInfo - if ss.statsHandler != nil || ss.binlog != nil { + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} } if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { if err == io.EOF { - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientHalfClose{}) + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } } return err } @@ -1548,20 +1724,26 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { } return toRPCErr(err) } - if ss.statsHandler != nil { - ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), - }) + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } } - if ss.binlog != nil { - ss.binlog.Log(&binarylog.ClientMessage{ + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ Message: payInfo.uncompressedBytes, - }) + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } } return nil } diff --git a/stress/client/main.go b/stress/client/main.go index 37e2a38f42a2..ef3db7c13864 100644 --- a/stress/client/main.go +++ b/stress/client/main.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/interop" "google.golang.org/grpc/status" @@ -286,14 +287,14 @@ func newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.C } creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { - logger.Fatalf("Failed to create TLS credentials %v", err) + logger.Fatalf("Failed to create TLS credentials: %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } return grpc.Dial(address, opts...) } diff --git a/stress/grpc_testing/metrics.pb.go b/stress/grpc_testing/metrics.pb.go index f8e95188fec0..83d76795fa74 100644 --- a/stress/grpc_testing/metrics.pb.go +++ b/stress/grpc_testing/metrics.pb.go @@ -21,14 +21,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: stress/grpc_testing/metrics.proto package grpc_testing import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -42,10 +41,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Response message containing the gauge name and value type GaugeResponse struct { state protoimpl.MessageState @@ -54,6 +49,7 @@ type GaugeResponse struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Types that are assignable to Value: + // // *GaugeResponse_LongValue // *GaugeResponse_DoubleValue // *GaugeResponse_StringValue diff --git a/stress/grpc_testing/metrics_grpc.pb.go b/stress/grpc_testing/metrics_grpc.pb.go index 2ece03255630..4e2f985bdf16 100644 --- a/stress/grpc_testing/metrics_grpc.pb.go +++ b/stress/grpc_testing/metrics_grpc.pb.go @@ -1,4 +1,29 @@ +// Copyright 2015-2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Contains the definitions for a metrics service and the type of metrics +// exposed by the service. +// +// Currently, 'Gauge' (i.e a metric that represents the measured value of +// something at an instant of time) is the only metric type supported by the +// service. + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.0 +// source: stress/grpc_testing/metrics.proto package grpc_testing @@ -14,6 +39,11 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + MetricsService_GetAllGauges_FullMethodName = "/grpc.testing.MetricsService/GetAllGauges" + MetricsService_GetGauge_FullMethodName = "/grpc.testing.MetricsService/GetGauge" +) + // MetricsServiceClient is the client API for MetricsService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -34,7 +64,7 @@ func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient { } func (c *metricsServiceClient) GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) { - stream, err := c.cc.NewStream(ctx, &MetricsService_ServiceDesc.Streams[0], "/grpc.testing.MetricsService/GetAllGauges", opts...) + stream, err := c.cc.NewStream(ctx, &MetricsService_ServiceDesc.Streams[0], MetricsService_GetAllGauges_FullMethodName, opts...) if err != nil { return nil, err } @@ -67,7 +97,7 @@ func (x *metricsServiceGetAllGaugesClient) Recv() (*GaugeResponse, error) { func (c *metricsServiceClient) GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) { out := new(GaugeResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.MetricsService/GetGauge", in, out, opts...) + err := c.cc.Invoke(ctx, MetricsService_GetGauge_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -140,7 +170,7 @@ func _MetricsService_GetGauge_Handler(srv interface{}, ctx context.Context, dec } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/grpc.testing.MetricsService/GetGauge", + FullMethod: MetricsService_GetGauge_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(MetricsServiceServer).GetGauge(ctx, req.(*GaugeRequest)) diff --git a/stress/metrics_client/main.go b/stress/metrics_client/main.go index ad6db6dd7a19..8948f868dbf3 100644 --- a/stress/metrics_client/main.go +++ b/stress/metrics_client/main.go @@ -26,6 +26,7 @@ import ( "io" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/grpclog" metricspb "google.golang.org/grpc/stress/grpc_testing" ) @@ -71,10 +72,10 @@ func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) { func main() { flag.Parse() if *metricsServerAddress == "" { - logger.Fatalf("Metrics server address is empty.") + logger.Fatal("-metrics_server_address is unset") } - conn, err := grpc.Dial(*metricsServerAddress, grpc.WithInsecure()) + conn, err := grpc.Dial(*metricsServerAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { logger.Fatalf("cannot connect to metrics server: %v", err) } diff --git a/tap/tap.go b/tap/tap.go index caea1ebed6e3..bfa5dfa40e4d 100644 --- a/tap/tap.go +++ b/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -37,16 +37,16 @@ type Info struct { // TODO: More to be added. } -// ServerInHandle defines the function which runs before a new stream is created -// on the server side. If it returns a non-nil error, the stream will not be -// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. -// The client will receive an RPC error "code = Unavailable, desc = stream -// terminated by RST_STREAM with error code: REFUSED_STREAM". +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. // // It's intended to be used in situations where you don't want to waste the -// resources to accept the new stream (e.g. rate-limiting). And the content of -// the error will be ignored and won't be sent back to the client. For other -// general usages, please use interceptors. +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. // // Note that it is executed in the per-connection I/O goroutine(s) instead of // per-RPC goroutine. Therefore, users should NOT have any diff --git a/test/authority_test.go b/test/authority_test.go index 17ae178b73c9..a4d481f24f92 100644 --- a/test/authority_test.go +++ b/test/authority_test.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux /* @@ -32,10 +33,15 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func authorityChecker(ctx context.Context, expectedAuthority string) (*testpb.Empty, error) { @@ -96,36 +102,39 @@ type authorityTest struct { var authorityTests = []authorityTest{ { - name: "UnixRelative", - address: "sock.sock", - target: "unix:sock.sock", - authority: "localhost", + name: "UnixRelative", + address: "sock.sock", + target: "unix:sock.sock", + authority: "localhost", + dialTargetWant: "unix:sock.sock", }, { - name: "UnixAbsolute", - address: "/tmp/sock.sock", - target: "unix:/tmp/sock.sock", - authority: "localhost", + name: "UnixAbsolute", + address: "/tmp/sock.sock", + target: "unix:/tmp/sock.sock", + authority: "localhost", + dialTargetWant: "unix:///tmp/sock.sock", }, { - name: "UnixAbsoluteAlternate", - address: "/tmp/sock.sock", - target: "unix:///tmp/sock.sock", - authority: "localhost", + name: "UnixAbsoluteAlternate", + address: "/tmp/sock.sock", + target: "unix:///tmp/sock.sock", + authority: "localhost", + dialTargetWant: "unix:///tmp/sock.sock", }, { name: "UnixPassthrough", address: "/tmp/sock.sock", target: "passthrough:///unix:///tmp/sock.sock", - authority: "unix:///tmp/sock.sock", + authority: "unix:%2F%2F%2Ftmp%2Fsock.sock", dialTargetWant: "unix:///tmp/sock.sock", }, { name: "UnixAbstract", - address: "\x00abc efg", + address: "@abc efg", target: "unix-abstract:abc efg", authority: "localhost", - dialTargetWant: "\x00abc efg", + dialTargetWant: "unix:@abc efg", }, } @@ -145,16 +154,11 @@ func (s) TestUnix(t *testing.T) { func (s) TestUnixCustomDialer(t *testing.T) { for _, test := range authorityTests { t.Run(test.name+"WithDialer", func(t *testing.T) { - if test.dialTargetWant == "" { - test.dialTargetWant = test.target - } dialer := func(ctx context.Context, address string) (net.Conn, error) { if address != test.dialTargetWant { return nil, fmt.Errorf("expected target %v in custom dialer, instead got %v", test.dialTargetWant, address) } - if !strings.HasPrefix(test.target, "unix-abstract:") { - address = address[len("unix:"):] - } + address = address[len("unix:"):] return (&net.Dialer{}).DialContext(ctx, "unix", address) } runUnixTest(t, test.address, test.target, test.authority, dialer) @@ -191,7 +195,7 @@ func (s) TestColonPortAuthority(t *testing.T) { // // Append "localhost" before calling net.Dial, in case net.Dial on certain // platforms doesn't work well for address without the IP. - cc, err := grpc.Dial(":"+port, grpc.WithInsecure(), grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + cc, err := grpc.Dial(":"+port, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return (&net.Dialer{}).DialContext(ctx, "tcp", "localhost"+addr) })) if err != nil { @@ -200,8 +204,40 @@ func (s) TestColonPortAuthority(t *testing.T) { defer cc.Close() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - _, err = testpb.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}) + _, err = testgrpc.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}) if err != nil { t.Errorf("us.client.EmptyCall(_, _) = _, %v; want _, nil", err) } } + +// TestAuthorityReplacedWithResolverAddress tests the scenario where the resolver +// returned address contains a ServerName override. The test verifies that the the +// :authority header value sent to the server as part of the http/2 HEADERS frame +// is set to the value specified in the resolver returned address. +func (s) TestAuthorityReplacedWithResolverAddress(t *testing.T) { + const expectedAuthority = "test.server.name" + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + return authorityChecker(ctx, expectedAuthority) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address, ServerName: expectedAuthority}}}) + cc, err := grpc.Dial(r.Scheme()+":///whatever", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial(%q) = %v", ss.Address, err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err = testgrpc.NewTestServiceClient(cc).EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() rpc failed: %v", err) + } +} diff --git a/test/balancer_switching_test.go b/test/balancer_switching_test.go new file mode 100644 index 000000000000..716625a63b3f --- /dev/null +++ b/test/balancer_switching_test.go @@ -0,0 +1,617 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils/fakegrpclb" + "google.golang.org/grpc/internal/testutils/pickfirst" + rrutil "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +const ( + loadBalancedServiceName = "foo.bar.service" + loadBalancedServicePort = 443 + wantGRPCLBTraceDesc = `Channel switches to new LB policy "grpclb"` + wantRoundRobinTraceDesc = `Channel switches to new LB policy "round_robin"` + + // This is the number of stub backends set up at the start of each test. The + // first backend is used for the "grpclb" policy and the rest are used for + // other LB policies to test balancer switching. + backendCount = 3 +) + +// setupBackendsAndFakeGRPCLB sets up backendCount number of stub server +// backends and a fake grpclb server for tests which exercise balancer switch +// scenarios involving grpclb. +// +// The fake grpclb server always returns the first of the configured stub +// backends as backend addresses. So, the tests are free to use the other +// backends with other LB policies to verify balancer switching scenarios. +// +// Returns a cleanup function to be invoked by the caller. +func setupBackendsAndFakeGRPCLB(t *testing.T) ([]*stubserver.StubServer, *fakegrpclb.Server, func()) { + czCleanup := channelz.NewChannelzStorageForTesting() + backends, backendsCleanup := startBackendsForBalancerSwitch(t) + + lbServer, err := fakegrpclb.NewServer(fakegrpclb.ServerParams{ + LoadBalancedServiceName: loadBalancedServiceName, + LoadBalancedServicePort: loadBalancedServicePort, + BackendAddresses: []string{backends[0].Address}, + }) + if err != nil { + t.Fatalf("failed to create fake grpclb server: %v", err) + } + go func() { + if err := lbServer.Serve(); err != nil { + t.Errorf("fake grpclb Serve() failed: %v", err) + } + }() + + return backends, lbServer, func() { + backendsCleanup() + lbServer.Stop() + czCleanupWrapper(czCleanup, t) + } +} + +// startBackendsForBalancerSwitch spins up a bunch of stub server backends +// exposing the TestService. Returns a cleanup function to be invoked by the +// caller. +func startBackendsForBalancerSwitch(t *testing.T) ([]*stubserver.StubServer, func()) { + t.Helper() + + backends := make([]*stubserver.StubServer, backendCount) + for i := 0; i < backendCount; i++ { + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + backends[i] = backend + } + return backends, func() { + for _, b := range backends { + b.Stop() + } + } +} + +// TestBalancerSwitch_Basic tests the basic scenario of switching from one LB +// policy to another, as specified in the service config. +func (s) TestBalancerSwitch_Basic(t *testing.T) { + backends, cleanup := startBackendsForBalancerSwitch(t) + defer cleanup() + addrs := stubBackendsToResolverAddrs(backends) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update without an LB policy in the service config. The + // channel should pick the default LB policy, which is pick_first. + r.UpdateState(resolver.State{Addresses: addrs}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Push a resolver update with the service config specifying "round_robin". + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), + }) + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { + t.Fatal(err) + } + + // Push a resolver update with the service config specifying "pick_first". + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, pickFirstServiceConfig), + }) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_grpclbToPickFirst tests the scenario where the channel +// starts off "grpclb", switches to "pick_first" and back. +func (s) TestBalancerSwitch_grpclbToPickFirst(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update with no service config and a single address pointing + // to the grpclb server we created above. This will cause the channel to + // switch to the "grpclb" balancer, which returns a single backend address. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[0:1]); err != nil { + t.Fatal(err) + } + + // Push a resolver update containing a non-existent grpclb server address. + // This should not lead to a balancer switch. + const nonExistentServer = "non-existent-grpclb-server-address" + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: nonExistentServer, Type: resolver.GRPCLB}}}) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Push a resolver update containing no grpclb server address. This should + // lead to the channel using the default LB policy which is pick_first. The + // list of addresses pushed as part of this update is different from the one + // returned by the "grpclb" balancer. So, we should see RPCs going to the + // newly configured backends, as part of the balancer switch. + r.UpdateState(resolver.State{Addresses: addrs[1:]}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_pickFirstToGRPCLB tests the scenario where the channel +// starts off with "pick_first", switches to "grpclb" and back. +func (s) TestBalancerSwitch_pickFirstToGRPCLB(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update containing no grpclb server address. This should + // lead to the channel using the default LB policy which is pick_first. + r.UpdateState(resolver.State{Addresses: addrs[1:]}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + // Push a resolver update with no service config and a single address pointing + // to the grpclb server we created above. This will cause the channel to + // switch to the "grpclb" balancer, which returns a single backend address. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Push a resolver update containing a non-existent grpclb server address. + // This should not lead to a balancer switch. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "nonExistentServer", Type: resolver.GRPCLB}}}) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Switch to "pick_first" again by sending no grpclb server addresses. + r.UpdateState(resolver.State{Addresses: addrs[1:]}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_RoundRobinToGRPCLB tests the scenario where the channel +// starts off with "round_robin", switches to "grpclb" and back. +// +// Note that this test uses the deprecated `loadBalancingPolicy` field in the +// service config. +func (s) TestBalancerSwitch_RoundRobinToGRPCLB(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Note the use of the deprecated `loadBalancingPolicy` field here instead + // of the now recommended `loadBalancingConfig` field. The logic in the + // ClientConn which decides which balancer to switch to looks at the + // following places in the given order of preference: + // - `loadBalancingConfig` field + // - addresses of type grpclb + // - `loadBalancingPolicy` field + // If we use the `loadBalancingPolicy` field, the switch to "grpclb" later on + // in the test will not happen as the ClientConn will continue to use the LB + // policy received in the first update. + scpr := parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`) + + // Push a resolver update with the service config specifying "round_robin". + r.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { + t.Fatal(err) + } + + // Push a resolver update with no service config and a single address pointing + // to the grpclb server we created above. This will cause the channel to + // switch to the "grpclb" balancer, which returns a single backend address. + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}, + ServiceConfig: scpr, + }) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Switch back to "round_robin". + r.UpdateState(resolver.State{Addresses: addrs[1:], ServiceConfig: scpr}) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_grpclbNotRegistered tests the scenario where the grpclb +// balancer is not registered. Verifies that the ClientConn fallbacks to the +// default LB policy or the LB policy specified in the service config, and that +// addresses of type "grpclb" are filtered out. +func (s) TestBalancerSwitch_grpclbNotRegistered(t *testing.T) { + // Unregister the grpclb balancer builder for the duration of this test. + grpclbBuilder := balancer.Get("grpclb") + internal.BalancerUnregister(grpclbBuilder.Name()) + defer balancer.Register(grpclbBuilder) + + backends, cleanup := startBackendsForBalancerSwitch(t) + defer cleanup() + addrs := stubBackendsToResolverAddrs(backends) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update which contains a bunch of stub server backends and a + // grpclb server address. The latter should get the ClientConn to try and + // apply the grpclb policy. But since grpclb is not registered, it should + // fallback to the default LB policy which is pick_first. The ClientConn is + // also expected to filter out the grpclb address when sending the addresses + // list fo pick_first. + grpclbAddr := []resolver.Address{{Addr: "non-existent-grpclb-server-address", Type: resolver.GRPCLB}} + addrs = append(grpclbAddr, addrs...) + r.UpdateState(resolver.State{Addresses: addrs}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + // Push a resolver update with the same addresses, but with a service config + // specifying "round_robin". The ClientConn is expected to filter out the + // grpclb address when sending the addresses list to round_robin. + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), + }) + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy verifies that +// if the resolver update contains any addresses of type "grpclb", it overrides +// the LB policy specifies in the deprecated `loadBalancingPolicy` field of the +// service config. +func (s) TestBalancerSwitch_grpclbAddressOverridesLoadBalancingPolicy(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update containing no grpclb server address. This should + // lead to the channel using the default LB policy which is pick_first. + r.UpdateState(resolver.State{Addresses: addrs[1:]}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + // Push a resolver update with no service config. The addresses list contains + // the stub backend addresses and a single address pointing to the grpclb + // server we created above. This will cause the channel to switch to the + // "grpclb" balancer, which returns a single backend address. + r.UpdateState(resolver.State{ + Addresses: append(addrs[1:], resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), + }) + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Push a resolver update with a service config using the deprecated + // `loadBalancingPolicy` field pointing to round_robin. The addresses list + // contains an address of type "grpclb". This should be preferred and hence + // there should be no balancer switch. + scpr := parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`) + r.UpdateState(resolver.State{ + Addresses: append(addrs[1:], resolver.Address{Addr: lbServer.Address(), Type: resolver.GRPCLB}), + ServiceConfig: scpr, + }) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Switch to "round_robin" by removing the address of type "grpclb". + r.UpdateState(resolver.State{Addresses: addrs[1:]}) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_LoadBalancingConfigTrumps verifies that the +// `loadBalancingConfig` field in the service config trumps over addresses of +// type "grpclb" when it comes to deciding which LB policy is applied on the +// channel. +func (s) TestBalancerSwitch_LoadBalancingConfigTrumps(t *testing.T) { + backends, lbServer, cleanup := setupBackendsAndFakeGRPCLB(t) + defer cleanup() + + addrs := stubBackendsToResolverAddrs(backends) + r := manual.NewBuilderWithScheme("whatever") + target := fmt.Sprintf("%s:///%s", r.Scheme(), loadBalancedServiceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update with no service config and a single address pointing + // to the grpclb server we created above. This will cause the channel to + // switch to the "grpclb" balancer, which returns a single backend address. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lbServer.Address(), Type: resolver.GRPCLB}}}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Push a resolver update with the service config specifying "round_robin" + // through the recommended `loadBalancingConfig` field. + r.UpdateState(resolver.State{ + Addresses: addrs[1:], + ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), + }) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { + t.Fatal(err) + } + + // Push a resolver update with no service config and an address of type + // "grpclb". The ClientConn should continue to use the service config received + // earlier, which specified the use of "round_robin" through the + // `loadBalancingConfig` field, and therefore the balancer should not be + // switched. And because the `loadBalancingConfig` field trumps everything + // else, the address of type "grpclb" should be ignored. + grpclbAddr := resolver.Address{Addr: "non-existent-grpclb-server-address", Type: resolver.GRPCLB} + r.UpdateState(resolver.State{Addresses: append(addrs[1:], grpclbAddr)}) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { + t.Fatal(err) + } +} + +// TestBalancerSwitch_OldBalancerCallsRemoveSubConnInClose tests the scenario +// where the balancer being switched out calls RemoveSubConn() in its Close() +// method. Verifies that this sequence of calls doesn't lead to a deadlock. +func (s) TestBalancerSwitch_OldBalancerCallsRemoveSubConnInClose(t *testing.T) { + // Register a stub balancer which calls RemoveSubConn() from its Close(). + scChan := make(chan balancer.SubConn, 1) + uccsCalled := make(chan struct{}, 1) + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(data *stub.BalancerData, ccs balancer.ClientConnState) error { + sc, err := data.ClientConn.NewSubConn(ccs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("failed to create subConn: %v", err) + } + scChan <- sc + close(uccsCalled) + return nil + }, + Close: func(data *stub.BalancerData) { + data.ClientConn.RemoveSubConn(<-scChan) + }, + }) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update specifying our stub balancer as the LB policy. + scpr := parseServiceConfig(t, r, fmt.Sprintf(`{"loadBalancingPolicy": "%v"}`, t.Name())) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "dummy-address"}}, + ServiceConfig: scpr, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for UpdateClientConnState to be called: %v", ctx.Err()) + case <-uccsCalled: + } + + // The following service config update will switch balancer from our stub + // balancer to pick_first. The former will be closed, which will call + // cc.RemoveSubConn() inline (this RemoveSubConn is not required by the API, + // but some balancers might do it). + // + // This is to make sure the cc.RemoveSubConn() from Close() doesn't cause a + // deadlock (e.g. trying to grab a mutex while it's already locked). + // + // Do it in a goroutine so this test will fail with a helpful message + // (though the goroutine will still leak). + done := make(chan struct{}) + go func() { + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "dummy-address"}}, + ServiceConfig: parseServiceConfig(t, r, pickFirstServiceConfig), + }) + close(done) + }() + + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for resolver.UpdateState to finish: %v", ctx.Err()) + case <-done: + } +} + +// TestBalancerSwitch_Graceful tests the graceful switching of LB policies. It +// starts off by configuring "round_robin" on the channel and ensures that RPCs +// are successful. Then, it switches to a stub balancer which does not report a +// picker until instructed by the test do to so. At this point, the test +// verifies that RPCs are still successful using the old balancer. Then the test +// asks the new balancer to report a healthy picker and the test verifies that +// the RPCs get routed using the picker reported by the new balancer. +func (s) TestBalancerSwitch_Graceful(t *testing.T) { + backends, cleanup := startBackendsForBalancerSwitch(t) + defer cleanup() + addrs := stubBackendsToResolverAddrs(backends) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + + // Push a resolver update with the service config specifying "round_robin". + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + r.UpdateState(resolver.State{ + Addresses: addrs[1:], + ServiceConfig: parseServiceConfig(t, r, rrServiceConfig), + }) + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { + t.Fatal(err) + } + + // Register a stub balancer which uses a "pick_first" balancer underneath and + // signals on a channel when it receives ClientConn updates. But it does not + // forward the ccUpdate to the underlying "pick_first" balancer until the test + // asks it to do so. This allows us to test the graceful switch functionality. + // Until the test asks the stub balancer to forward the ccUpdate, RPCs should + // get routed to the old balancer. And once the test gives the go ahead, RPCs + // should get routed to the new balancer. + ccUpdateCh := make(chan struct{}) + waitToProceed := make(chan struct{}) + stub.Register(t.Name(), stub.BalancerFuncs{ + Init: func(bd *stub.BalancerData) { + pf := balancer.Get(grpc.PickFirstBalancerName) + bd.Data = pf.Build(bd.ClientConn, bd.BuildOptions) + }, + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + bal := bd.Data.(balancer.Balancer) + close(ccUpdateCh) + go func() { + <-waitToProceed + bal.UpdateClientConnState(ccs) + }() + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + bal := bd.Data.(balancer.Balancer) + bal.UpdateSubConnState(sc, state) + }, + }) + + // Push a resolver update with the service config specifying our stub + // balancer. We should see a trace event for this balancer switch. But RPCs + // should still be routed to the old balancer since our stub balancer does not + // report a ready picker until we ask it to do so. + r.UpdateState(resolver.State{ + Addresses: addrs[:1], + ServiceConfig: r.CC.ParseServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%v": {}}]}`, t.Name())), + }) + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for a ClientConnState update on the new balancer") + case <-ccUpdateCh: + } + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs[1:]); err != nil { + t.Fatal(err) + } + + // Ask our stub balancer to forward the earlier received ccUpdate to the + // underlying "pick_first" balancer which will result in a healthy picker + // being reported to the channel. RPCs should start using the new balancer. + close(waitToProceed) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } +} diff --git a/test/balancer_test.go b/test/balancer_test.go index bc22036dbac3..4026c75b46e3 100644 --- a/test/balancer_test.go +++ b/test/balancer_test.go @@ -28,13 +28,14 @@ import ( "time" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/grpcsync" @@ -46,8 +47,10 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const testBalancerName = "testbalancer" @@ -88,7 +91,7 @@ func (b *testBalancer) UpdateClientConnState(state balancer.ClientConnState) err logger.Errorf("testBalancer: failed to NewSubConn: %v", err) return nil } - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &picker{sc: b.sc, bal: b}}) + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable, bal: b}}) b.sc.Connect() } return nil @@ -106,8 +109,10 @@ func (b *testBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubCon } switch s.ConnectivityState { - case connectivity.Ready, connectivity.Idle: + case connectivity.Ready: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{sc: sc, bal: b}}) + case connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{sc: sc, bal: b, idle: true}}) case connectivity.Connecting: b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable, bal: b}}) case connectivity.TransientFailure: @@ -117,16 +122,23 @@ func (b *testBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubCon func (b *testBalancer) Close() {} +func (b *testBalancer) ExitIdle() {} + type picker struct { - err error - sc balancer.SubConn - bal *testBalancer + err error + sc balancer.SubConn + bal *testBalancer + idle bool } func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { if p.err != nil { return balancer.PickResult{}, p.err } + if p.idle { + p.sc.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } extraMD, _ := grpcutil.ExtraMetadata(info.Ctx) info.Ctx = nil // Do not validate context. p.bal.pickInfos = append(p.bal.pickInfos, info) @@ -143,7 +155,7 @@ func (s) TestCredsBundleFromBalancer(t *testing.T) { te := newTest(t, env{name: "creds-bundle", network: "tcp", balancer: ""}) te.tapHandle = authHandle te.customDialOptions = []grpc.DialOption{ - grpc.WithBalancerName(testBalancerName), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, testBalancerName)), } creds, err := credentials.NewServerTLSFromFile(testdata.Path("x509/server1_cert.pem"), testdata.Path("x509/server1_key.pem")) if err != nil { @@ -156,7 +168,7 @@ func (s) TestCredsBundleFromBalancer(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("Test failed. Reason: %v", err) } @@ -178,7 +190,7 @@ func testPickExtraMetadata(t *testing.T, e env) { ) te.customDialOptions = []grpc.DialOption{ - grpc.WithBalancerName(testBalancerName), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, testBalancerName)), grpc.WithUserAgent(testUserAgent), } te.startServer(&testServer{security: e.security}) @@ -193,16 +205,16 @@ func testPickExtraMetadata(t *testing.T, e env) { r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: te.srvAddr}}}) te.resolverScheme = "xds" cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) - // The RPCs will fail, but we don't care. We just need the pick to happen. - ctx1, cancel1 := context.WithTimeout(context.Background(), time.Second) - defer cancel1() - tc.EmptyCall(ctx1, &testpb.Empty{}) - - ctx2, cancel2 := context.WithTimeout(context.Background(), time.Second) - defer cancel2() - tc.EmptyCall(ctx2, &testpb.Empty{}, grpc.CallContentSubtype(testSubContentType)) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, nil) + } + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.CallContentSubtype(testSubContentType)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, nil) + } want := []metadata.MD{ // First RPC doesn't have sub-content-type. @@ -210,9 +222,8 @@ func testPickExtraMetadata(t *testing.T, e env) { // Second RPC has sub-content-type "proto". {"content-type": []string{"application/grpc+proto"}}, } - - if !cmp.Equal(b.pickExtraMDs, want) { - t.Fatalf("%s", cmp.Diff(b.pickExtraMDs, want)) + if diff := cmp.Diff(want, b.pickExtraMDs); diff != "" { + t.Fatalf("unexpected diff in metadata (-want, +got): %s", diff) } } @@ -227,14 +238,14 @@ func testDoneInfo(t *testing.T, e env) { b := &testBalancer{} balancer.Register(b) te.customDialOptions = []grpc.DialOption{ - grpc.WithBalancerName(testBalancerName), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, testBalancerName)), } te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -291,12 +302,10 @@ func init() { } func (s) TestDoneLoads(t *testing.T) { - for _, e := range listTestEnv() { - testDoneLoads(t, e) - } + testDoneLoads(t) } -func testDoneLoads(t *testing.T, e env) { +func testDoneLoads(t *testing.T) { b := &testBalancer{} balancer.Register(b) @@ -308,12 +317,12 @@ func testDoneLoads(t *testing.T, e env) { return &testpb.Empty{}, nil }, } - if err := ss.Start(nil, grpc.WithBalancerName(testBalancerName)); err != nil { + if err := ss.Start(nil, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, testBalancerName))); err != nil { t.Fatalf("error starting testing server: %v", err) } defer ss.Stop() - tc := testpb.NewTestServiceClient(ss.CC) + tc := testgrpc.NewTestServiceClient(ss.CC) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -374,8 +383,9 @@ func (testBalancerKeepAddresses) UpdateSubConnState(sc balancer.SubConn, s balan panic("not used") } -func (testBalancerKeepAddresses) Close() { -} +func (testBalancerKeepAddresses) Close() {} + +func (testBalancerKeepAddresses) ExitIdle() {} // Make sure that non-grpclb balancers don't get grpclb addresses even if name // resolver sends them @@ -385,8 +395,10 @@ func (s) TestNonGRPCLBBalancerGetsNoGRPCLBAddress(t *testing.T) { b := newTestBalancerKeepAddresses() balancer.Register(b) - cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithInsecure(), grpc.WithResolvers(r), - grpc.WithBalancerName(b.Name())) + cc, err := grpc.Dial(r.Scheme()+":///test.server", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, b.Name()))) if err != nil { t.Fatalf("failed to dial: %v", err) } @@ -500,7 +512,7 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) { } s := grpc.NewServer() - testpb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) defer s.Stop() t.Logf("Started gRPC server at %s...", lis.Addr().String()) @@ -516,7 +528,7 @@ func (s) TestAddressAttributesInNewSubConn(t *testing.T) { t.Fatal(err) } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) t.Log("Created a ClientConn...") // The first RPC should fail because there's no address. @@ -633,7 +645,7 @@ func (s) TestServersSwap(t *testing.T) { return &testpb.SimpleResponse{Username: username}, nil }, } - testpb.RegisterTestServiceServer(s, ts) + testgrpc.RegisterTestServiceServer(s, ts) go s.Serve(lis) return lis.Addr().String(), s.Stop } @@ -647,12 +659,12 @@ func (s) TestServersSwap(t *testing.T) { // Initialize client r := manual.NewBuilderWithScheme("whatever") r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: addr1}}}) - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(r)) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) if err != nil { t.Fatalf("Error creating client: %v", err) } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) // Confirm we are connected to the first server if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one { @@ -673,10 +685,7 @@ func (s) TestServersSwap(t *testing.T) { } } -// TestEmptyAddrs verifies client behavior when a working connection is -// removed. In pick first and round-robin, both will continue using the old -// connections. -func (s) TestEmptyAddrs(t *testing.T) { +func (s) TestWaitForReady(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -693,135 +702,456 @@ func (s) TestEmptyAddrs(t *testing.T) { return &testpb.SimpleResponse{Username: one}, nil }, } - testpb.RegisterTestServiceServer(s, ts) + testgrpc.RegisterTestServiceServer(s, ts) go s.Serve(lis) - // Initialize pickfirst client - pfr := manual.NewBuilderWithScheme("whatever") - pfrnCalled := grpcsync.NewEvent() - pfr.ResolveNowCallback = func(resolver.ResolveNowOptions) { - pfrnCalled.Fire() - } - pfr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) + // Initialize client + r := manual.NewBuilderWithScheme("whatever") - pfcc, err := grpc.DialContext(ctx, pfr.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(pfr)) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) if err != nil { t.Fatalf("Error creating client: %v", err) } - defer pfcc.Close() - pfclient := testpb.NewTestServiceClient(pfcc) + defer cc.Close() + client := testgrpc.NewTestServiceClient(cc) - // Confirm we are connected to the server - if res, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one { - t.Fatalf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one) - } + // Report an error so non-WFR RPCs will give up early. + r.CC.ReportError(errors.New("fake resolver error")) - // Remove all addresses. - pfr.UpdateState(resolver.State{}) - // Wait for a ResolveNow call on the pick first client's resolver. - <-pfrnCalled.Done() + // Ensure the client is not connected to anything and fails non-WFR RPCs. + if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable { + t.Fatalf("UnaryCall(_) = %v, %v; want _, Code()=%v", res, err, codes.Unavailable) + } - // Initialize roundrobin client - rrr := manual.NewBuilderWithScheme("whatever") + errChan := make(chan error, 1) + go func() { + if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.WaitForReady(true)); err != nil || res.Username != one { + errChan <- fmt.Errorf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one) + } + close(errChan) + }() - rrrnCalled := grpcsync.NewEvent() - rrr.ResolveNowCallback = func(resolver.ResolveNowOptions) { - rrrnCalled.Fire() + select { + case err := <-errChan: + t.Errorf("unexpected receive from errChan before addresses provided") + t.Fatal(err.Error()) + case <-time.After(5 * time.Millisecond): } - rrr.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - rrcc, err := grpc.DialContext(ctx, rrr.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(rrr), - grpc.WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, roundrobin.Name))) - if err != nil { - t.Fatalf("Error creating client: %v", err) + // Resolve the server. The WFR RPC should unblock and use it. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) + + if err := <-errChan; err != nil { + t.Fatal(err.Error()) } - defer rrcc.Close() - rrclient := testpb.NewTestServiceClient(rrcc) +} - // Confirm we are connected to the server - if res, err := rrclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil || res.Username != one { - t.Fatalf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one) +// authorityOverrideTransportCreds returns the configured authority value in its +// Info() method. +type authorityOverrideTransportCreds struct { + credentials.TransportCredentials + authorityOverride string +} + +func (ao *authorityOverrideTransportCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} +func (ao *authorityOverrideTransportCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{ServerName: ao.authorityOverride} +} +func (ao *authorityOverrideTransportCreds) Clone() credentials.TransportCredentials { + return &authorityOverrideTransportCreds{authorityOverride: ao.authorityOverride} +} + +// TestAuthorityInBuildOptions tests that the Authority field in +// balancer.BuildOptions is setup correctly from gRPC. +func (s) TestAuthorityInBuildOptions(t *testing.T) { + const dialTarget = "test.server" + + tests := []struct { + name string + dopts []grpc.DialOption + wantAuthority string + }{ + { + name: "authority from dial target", + dopts: []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}, + wantAuthority: dialTarget, + }, + { + name: "authority from dial option", + dopts: []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithAuthority("authority-override"), + }, + wantAuthority: "authority-override", + }, + { + name: "authority from transport creds", + dopts: []grpc.DialOption{grpc.WithTransportCredentials(&authorityOverrideTransportCreds{authorityOverride: "authority-override-from-transport-creds"})}, + wantAuthority: "authority-override-from-transport-creds", + }, } - // Remove all addresses. - rrr.UpdateState(resolver.State{}) - // Wait for a ResolveNow call on the round robin client's resolver. - <-rrrnCalled.Done() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + authorityCh := make(chan string, 1) + bf := stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + select { + case authorityCh <- bd.BuildOptions.Authority: + default: + } + + addrs := ccs.ResolverState.Addresses + if len(addrs) == 0 { + return nil + } + + // Only use the first address. + sc, err := bd.ClientConn.NewSubConn([]resolver.Address{addrs[0]}, balancer.NewSubConnOptions{}) + if err != nil { + return err + } + sc.Connect() + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: state.ConnectivityState, Picker: &aiPicker{result: balancer.PickResult{SubConn: sc}, err: state.ConnectionError}}) + }, + } + balancerName := "stub-balancer-" + test.name + stub.Register(balancerName, bf) + t.Logf("Registered balancer %s...", balancerName) - // Confirm several new RPCs succeed on pick first. - for i := 0; i < 10; i++ { - if _, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { - t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) - } - time.Sleep(5 * time.Millisecond) + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatal(err) + } + + s := grpc.NewServer() + testgrpc.RegisterTestServiceServer(s, &testServer{}) + go s.Serve(lis) + defer s.Stop() + t.Logf("Started gRPC server at %s...", lis.Addr().String()) + + r := manual.NewBuilderWithScheme("whatever") + t.Logf("Registered manual resolver with scheme %s...", r.Scheme()) + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) + + dopts := append([]grpc.DialOption{ + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, balancerName)), + }, test.dopts...) + cc, err := grpc.Dial(r.Scheme()+":///"+dialTarget, dopts...) + if err != nil { + t.Fatal(err) + } + defer cc.Close() + tc := testgrpc.NewTestServiceClient(cc) + t.Log("Created a ClientConn...") + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = _, %v, want _, ", err) + } + t.Log("Made an RPC which succeeded...") + + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for Authority in balancer.BuildOptions") + case gotAuthority := <-authorityCh: + if gotAuthority != test.wantAuthority { + t.Fatalf("Authority in balancer.BuildOptions is %s, want %s", gotAuthority, test.wantAuthority) + } + } + }) } +} - // Confirm several new RPCs succeed on round robin. - for i := 0; i < 10; i++ { - if _, err := pfclient.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { - t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) - } - time.Sleep(5 * time.Millisecond) +// wrappedPickFirstBalancerBuilder builds a custom balancer which wraps an +// underlying pick_first balancer. +type wrappedPickFirstBalancerBuilder struct { + name string +} + +func (*wrappedPickFirstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(grpc.PickFirstBalancerName) + wpfb := &wrappedPickFirstBalancer{ + ClientConn: cc, } + pf := builder.Build(wpfb, opts) + wpfb.Balancer = pf + return wpfb } -func (s) TestWaitForReady(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() +func (wbb *wrappedPickFirstBalancerBuilder) Name() string { + return wbb.name +} - // Initialize server - lis, err := net.Listen("tcp", "localhost:0") +// wrappedPickFirstBalancer contains a pick_first balancer and forwards all +// calls from the ClientConn to it. For state updates from the pick_first +// balancer, it creates a custom picker which injects arbitrary metadata on a +// per-call basis. +type wrappedPickFirstBalancer struct { + balancer.Balancer + balancer.ClientConn +} + +func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { + state.Picker = &wrappedPicker{p: state.Picker} + wb.ClientConn.UpdateState(state) +} + +const ( + metadataHeaderInjectedByBalancer = "metadata-header-injected-by-balancer" + metadataHeaderInjectedByApplication = "metadata-header-injected-by-application" + metadataValueInjectedByBalancer = "metadata-value-injected-by-balancer" + metadataValueInjectedByApplication = "metadata-value-injected-by-application" +) + +// wrappedPicker wraps the picker returned by the pick_first +type wrappedPicker struct { + p balancer.Picker +} + +func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + res, err := wp.p.Pick(info) if err != nil { - t.Fatalf("Error while listening. Err: %v", err) + return balancer.PickResult{}, err } - s := grpc.NewServer() - defer s.Stop() - const one = "1" - ts := &funcServer{ - unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{Username: one}, nil + + if res.Metadata == nil { + res.Metadata = metadata.Pairs(metadataHeaderInjectedByBalancer, metadataValueInjectedByBalancer) + } else { + res.Metadata.Append(metadataHeaderInjectedByBalancer, metadataValueInjectedByBalancer) + } + return res, nil +} + +// TestMetadataInPickResult tests the scenario where an LB policy inject +// arbitrary metadata on a per-call basis and verifies that the injected +// metadata makes it all the way to the server RPC handler. +func (s) TestMetadataInPickResult(t *testing.T) { + t.Log("Starting test backend...") + mdChan := make(chan metadata.MD, 1) + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + select { + case mdChan <- md: + case <-ctx.Done(): + return nil, ctx.Err() + } + return &testpb.Empty{}, nil }, } - testpb.RegisterTestServiceServer(s, ts) - go s.Serve(lis) + if err := ss.StartServer(); err != nil { + t.Fatalf("Starting test backend: %v", err) + } + defer ss.Stop() + t.Logf("Started test backend at %q", ss.Address) - // Initialize client - r := manual.NewBuilderWithScheme("whatever") + name := t.Name() + "wrappedPickFirstBalancer" + t.Logf("Registering test balancer with name %q...", name) + b := &wrappedPickFirstBalancerBuilder{name: t.Name() + "wrappedPickFirstBalancer"} + balancer.Register(b) - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithInsecure(), grpc.WithResolvers(r)) + t.Log("Creating ClientConn to test backend...") + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address}}}) + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, b.Name())), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) if err != nil { - t.Fatalf("Error creating client: %v", err) + t.Fatalf("grpc.Dial(): %v", err) } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) - // Report an error so non-WFR RPCs will give up early. - r.CC.ReportError(errors.New("fake resolver error")) + t.Log("Making EmptyCall() RPC with custom metadata...") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + md := metadata.Pairs(metadataHeaderInjectedByApplication, metadataValueInjectedByApplication) + ctx = metadata.NewOutgoingContext(ctx, md) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() RPC: %v", err) + } + t.Log("EmptyCall() RPC succeeded") - // Ensure the client is not connected to anything and fails non-WFR RPCs. - if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable { - t.Fatalf("UnaryCall(_) = %v, %v; want _, Code()=%v", res, err, codes.Unavailable) + t.Log("Waiting for custom metadata to be received at the test backend...") + var gotMD metadata.MD + select { + case gotMD = <-mdChan: + case <-ctx.Done(): + t.Fatalf("Timed out waiting for custom metadata to be received at the test backend") } - errChan := make(chan error, 1) + t.Log("Verifying custom metadata added by the client application is received at the test backend...") + wantMDVal := []string{metadataValueInjectedByApplication} + gotMDVal := gotMD.Get(metadataHeaderInjectedByApplication) + if !cmp.Equal(gotMDVal, wantMDVal) { + t.Fatalf("Mismatch in custom metadata received at test backend, got: %v, want %v", gotMDVal, wantMDVal) + } + + t.Log("Verifying custom metadata added by the LB policy is received at the test backend...") + wantMDVal = []string{metadataValueInjectedByBalancer} + gotMDVal = gotMD.Get(metadataHeaderInjectedByBalancer) + if !cmp.Equal(gotMDVal, wantMDVal) { + t.Fatalf("Mismatch in custom metadata received at test backend, got: %v, want %v", gotMDVal, wantMDVal) + } +} + +// producerTestBalancerBuilder and producerTestBalancer start a producer which +// makes an RPC before the subconn is READY, then connects the subconn, and +// pushes the resulting error (expected to be nil) to rpcErrChan. +type producerTestBalancerBuilder struct { + rpcErrChan chan error + ctxChan chan context.Context + connect bool +} + +func (bb *producerTestBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return &producerTestBalancer{cc: cc, rpcErrChan: bb.rpcErrChan, ctxChan: bb.ctxChan, connect: bb.connect} +} + +const producerTestBalancerName = "producer_test_balancer" + +func (bb *producerTestBalancerBuilder) Name() string { return producerTestBalancerName } + +type producerTestBalancer struct { + cc balancer.ClientConn + rpcErrChan chan error + ctxChan chan context.Context + connect bool +} + +func (b *producerTestBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + // Create the subconn, but don't connect it. + sc, err := b.cc.NewSubConn(ccs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + return fmt.Errorf("error creating subconn: %v", err) + } + + // Create the producer. This will call the producer builder's Build + // method, which will try to start an RPC in a goroutine. + p := &testProducerBuilder{start: grpcsync.NewEvent(), rpcErrChan: b.rpcErrChan, ctxChan: b.ctxChan} + sc.GetOrBuildProducer(p) + + // Wait here until the producer is about to perform the RPC, which should + // block until connected. + <-p.start.Done() + + // Ensure the error chan doesn't get anything on it before we connect the + // subconn. + select { + case err := <-b.rpcErrChan: + go func() { b.rpcErrChan <- fmt.Errorf("Got unexpected data on rpcErrChan: %v", err) }() + default: + } + + if b.connect { + // Now we can connect, which will unblock the RPC above. + sc.Connect() + } + + // The stub server requires a READY picker to be reported, to unblock its + // Start method. We won't make RPCs in our test, so a nil picker is okay. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil}) + return nil +} + +func (b *producerTestBalancer) ResolverError(err error) { + panic(fmt.Sprintf("Unexpected resolver error: %v", err)) +} + +func (b *producerTestBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {} +func (b *producerTestBalancer) Close() {} + +type testProducerBuilder struct { + start *grpcsync.Event + rpcErrChan chan error + ctxChan chan context.Context +} + +func (b *testProducerBuilder) Build(cci interface{}) (balancer.Producer, func()) { + c := testgrpc.NewTestServiceClient(cci.(grpc.ClientConnInterface)) + // Perform the RPC in a goroutine instead of during build because the + // subchannel's mutex is held here. go func() { - if res, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}, grpc.WaitForReady(true)); err != nil || res.Username != one { - errChan <- fmt.Errorf("UnaryCall(_) = %v, %v; want {Username: %q}, nil", res, err, one) - } - close(errChan) + ctx := <-b.ctxChan + b.start.Fire() + _, err := c.EmptyCall(ctx, &testpb.Empty{}) + b.rpcErrChan <- err }() + return nil, func() {} +} - select { - case err := <-errChan: - t.Errorf("unexpected receive from errChan before addresses provided") - t.Fatal(err.Error()) - case <-time.After(5 * time.Millisecond): +// TestBalancerProducerBlockUntilReady tests that we get no RPC errors from +// producers when subchannels aren't ready. +func (s) TestBalancerProducerBlockUntilReady(t *testing.T) { + // rpcErrChan is given to the LB policy to report the status of the + // producer's one RPC. + ctxChan := make(chan context.Context, 1) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + ctxChan <- ctx + + rpcErrChan := make(chan error) + balancer.Register(&producerTestBalancerBuilder{rpcErrChan: rpcErrChan, ctxChan: ctxChan, connect: true}) + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, } - // Resolve the server. The WFR RPC should unblock and use it. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) + // Start the server & client with the test producer LB policy. + svcCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, producerTestBalancerName) + if err := ss.Start(nil, grpc.WithDefaultServiceConfig(svcCfg)); err != nil { + t.Fatalf("Error starting testing server: %v", err) + } + defer ss.Stop() - if err := <-errChan; err != nil { - t.Fatal(err.Error()) + // Receive the error from the producer's RPC, which should be nil. + if err := <-rpcErrChan; err != nil { + t.Fatalf("Received unexpected error from producer RPC: %v", err) + } +} + +// TestBalancerProducerHonorsContext tests that producers that perform RPC get +// context errors correctly. +func (s) TestBalancerProducerHonorsContext(t *testing.T) { + // rpcErrChan is given to the LB policy to report the status of the + // producer's one RPC. + ctxChan := make(chan context.Context, 1) + ctx, cancel := context.WithCancel(context.Background()) + ctxChan <- ctx + + rpcErrChan := make(chan error) + balancer.Register(&producerTestBalancerBuilder{rpcErrChan: rpcErrChan, ctxChan: ctxChan, connect: false}) + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + // Start the server & client with the test producer LB policy. + svcCfg := fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, producerTestBalancerName) + if err := ss.Start(nil, grpc.WithDefaultServiceConfig(svcCfg)); err != nil { + t.Fatalf("Error starting testing server: %v", err) + } + defer ss.Stop() + + cancel() + + // Receive the error from the producer's RPC, which should be canceled. + if err := <-rpcErrChan; status.Code(err) != codes.Canceled { + t.Fatalf("RPC error: %v; want status.Code(err)=%v", err, codes.Canceled) } } diff --git a/test/bufconn/bufconn.go b/test/bufconn/bufconn.go index 168cdb8578dd..3f77f4876eb8 100644 --- a/test/bufconn/bufconn.go +++ b/test/bufconn/bufconn.go @@ -21,6 +21,7 @@ package bufconn import ( + "context" "fmt" "io" "net" @@ -86,8 +87,17 @@ func (l *Listener) Addr() net.Addr { return addr{} } // providing it the server half of the connection, and returns the client half // of the connection. func (l *Listener) Dial() (net.Conn, error) { + return l.DialContext(context.Background()) +} + +// DialContext creates an in-memory full-duplex network connection, unblocks Accept by +// providing it the server half of the connection, and returns the client half +// of the connection. If ctx is Done, returns ctx.Err() +func (l *Listener) DialContext(ctx context.Context) (net.Conn, error) { p1, p2 := newPipe(l.sz), newPipe(l.sz) select { + case <-ctx.Done(): + return nil, ctx.Err() case <-l.done: return nil, errClosed case l.ch <- &conn{p1, p2}: diff --git a/test/channelz_linux_go110_test.go b/test/channelz_linux_test.go similarity index 95% rename from test/channelz_linux_go110_test.go rename to test/channelz_linux_test.go index dea374bfc08b..e532fbb12c56 100644 --- a/test/channelz_linux_go110_test.go +++ b/test/channelz_linux_test.go @@ -1,5 +1,3 @@ -// +build linux - /* * * Copyright 2018 gRPC authors. @@ -29,7 +27,8 @@ import ( "time" "google.golang.org/grpc/internal/channelz" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestCZSocketMetricsSocketOption(t *testing.T) { @@ -40,13 +39,13 @@ func (s) TestCZSocketMetricsSocketOption(t *testing.T) { } func testCZSocketMetricsSocketOption(t *testing.T, e env) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) doSuccessfulUnaryCall(tc, t) time.Sleep(10 * time.Millisecond) diff --git a/test/channelz_test.go b/test/channelz_test.go index 47e7eb927169..d43c155a15df 100644 --- a/test/channelz_test.go +++ b/test/channelz_test.go @@ -23,12 +23,13 @@ import ( "crypto/tls" "fmt" "net" - "reflect" + "regexp" "strings" "sync" "testing" "time" + "github.com/google/go-cmp/cmp" "golang.org/x/net/http2" "google.golang.org/grpc" _ "google.golang.org/grpc/balancer/grpclb" @@ -43,8 +44,10 @@ import ( "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func czCleanupWrapper(cleanup func() error, t *testing.T) { @@ -82,7 +85,7 @@ func (s) TestCZServerRegistrationAndDeletion(t *testing.T) { } for _, c := range testcases { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -101,7 +104,7 @@ func (s) TestCZServerRegistrationAndDeletion(t *testing.T) { } func (s) TestCZGetServer(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -153,7 +156,7 @@ func (s) TestCZTopChannelRegistrationAndDeletion(t *testing.T) { } for _, c := range testcases { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -191,7 +194,7 @@ func (s) TestCZTopChannelRegistrationAndDeletion(t *testing.T) { } func (s) TestCZTopChannelRegistrationAndDeletionWhenDialFail(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) // Make dial fails (due to no transport security specified) _, err := grpc.Dial("fake.addr") @@ -204,7 +207,7 @@ func (s) TestCZTopChannelRegistrationAndDeletionWhenDialFail(t *testing.T) { } func (s) TestCZNestedChannelRegistrationAndDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv // avoid calling API to set balancer type, which will void service config's change of balancer. @@ -230,7 +233,10 @@ func (s) TestCZNestedChannelRegistrationAndDeletion(t *testing.T) { t.Fatal(err) } - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { @@ -248,7 +254,7 @@ func (s) TestCZNestedChannelRegistrationAndDeletion(t *testing.T) { } func (s) TestCZClientSubChannelSocketRegistrationAndDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv num := 3 // number of backends @@ -336,7 +342,7 @@ func (s) TestCZServerSocketRegistrationAndDeletion(t *testing.T) { } for _, c := range testcases { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -396,7 +402,7 @@ func (s) TestCZServerSocketRegistrationAndDeletion(t *testing.T) { } func (s) TestCZServerListenSocketDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) s := grpc.NewServer() lis, err := net.Listen("tcp", "localhost:0") @@ -453,13 +459,13 @@ func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { // | | // v v // Socket1 Socket2 - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) - topChanID := channelz.RegisterChannel(&dummyChannel{}, 0, "") - subChanID1 := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") - subChanID2 := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") - sktID1 := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") - sktID2 := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") + topChanID := channelz.RegisterChannel(&dummyChannel{}, nil, "") + subChanID1, _ := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") + subChanID2, _ := channelz.RegisterSubChannel(&dummyChannel{}, topChanID, "") + sktID1, _ := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") + sktID2, _ := channelz.RegisterNormalSocket(&dummySocket{}, subChanID1, "") tcs, _ := channelz.GetTopChannels(0, 0) if tcs == nil || len(tcs) != 1 { @@ -468,7 +474,7 @@ func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { if len(tcs[0].SubChans) != 2 { t.Fatalf("There should be two SubChannel entries") } - sc := channelz.GetSubChannel(subChanID1) + sc := channelz.GetSubChannel(subChanID1.Int()) if sc == nil || len(sc.Sockets) != 2 { t.Fatalf("There should be two Socket entries") } @@ -498,7 +504,7 @@ func (s) TestCZRecusivelyDeletionOfEntry(t *testing.T) { } func (s) TestCZChannelMetrics(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv num := 3 // number of backends @@ -514,7 +520,7 @@ func (s) TestCZChannelMetrics(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } @@ -586,7 +592,7 @@ func (s) TestCZChannelMetrics(t *testing.T) { } func (s) TestCZServerMetrics(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -594,7 +600,7 @@ func (s) TestCZServerMetrics(t *testing.T) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) if _, err := tc.EmptyCall(context.Background(), &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } @@ -642,7 +648,7 @@ func (s) TestCZServerMetrics(t *testing.T) { } type testServiceClientWrapper struct { - testpb.TestServiceClient + testgrpc.TestServiceClient mu sync.RWMutex streamsCreated int } @@ -667,35 +673,35 @@ func (t *testServiceClientWrapper) UnaryCall(ctx context.Context, in *testpb.Sim return t.TestServiceClient.UnaryCall(ctx, in, opts...) } -func (t *testServiceClientWrapper) StreamingOutputCall(ctx context.Context, in *testpb.StreamingOutputCallRequest, opts ...grpc.CallOption) (testpb.TestService_StreamingOutputCallClient, error) { +func (t *testServiceClientWrapper) StreamingOutputCall(ctx context.Context, in *testpb.StreamingOutputCallRequest, opts ...grpc.CallOption) (testgrpc.TestService_StreamingOutputCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.StreamingOutputCall(ctx, in, opts...) } -func (t *testServiceClientWrapper) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_StreamingInputCallClient, error) { +func (t *testServiceClientWrapper) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (testgrpc.TestService_StreamingInputCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.StreamingInputCall(ctx, opts...) } -func (t *testServiceClientWrapper) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_FullDuplexCallClient, error) { +func (t *testServiceClientWrapper) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testgrpc.TestService_FullDuplexCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.FullDuplexCall(ctx, opts...) } -func (t *testServiceClientWrapper) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testpb.TestService_HalfDuplexCallClient, error) { +func (t *testServiceClientWrapper) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (testgrpc.TestService_HalfDuplexCallClient, error) { t.mu.Lock() defer t.mu.Unlock() t.streamsCreated++ return t.TestServiceClient.HalfDuplexCall(ctx, opts...) } -func doSuccessfulUnaryCall(tc testpb.TestServiceClient, t *testing.T) { +func doSuccessfulUnaryCall(tc testgrpc.TestServiceClient, t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -703,7 +709,7 @@ func doSuccessfulUnaryCall(tc testpb.TestServiceClient, t *testing.T) { } } -func doStreamingInputCallWithLargePayload(tc testpb.TestServiceClient, t *testing.T) { +func doStreamingInputCallWithLargePayload(tc testgrpc.TestServiceClient, t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() s, err := tc.StreamingInputCall(ctx) @@ -717,7 +723,7 @@ func doStreamingInputCallWithLargePayload(tc testpb.TestServiceClient, t *testin s.Send(&testpb.StreamingInputCallRequest{Payload: payload}) } -func doServerSideFailedUnaryCall(tc testpb.TestServiceClient, t *testing.T) { +func doServerSideFailedUnaryCall(tc testgrpc.TestServiceClient, t *testing.T) { const smallSize = 1 const largeSize = 2000 @@ -737,7 +743,7 @@ func doServerSideFailedUnaryCall(tc testpb.TestServiceClient, t *testing.T) { } } -func doClientSideInitiatedFailedStream(tc testpb.TestServiceClient, t *testing.T) { +func doClientSideInitiatedFailedStream(tc testgrpc.TestServiceClient, t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) stream, err := tc.FullDuplexCall(ctx) if err != nil { @@ -770,7 +776,7 @@ func doClientSideInitiatedFailedStream(tc testpb.TestServiceClient, t *testing.T } // This func is to be used to test client side counting of failed streams. -func doServerSideInitiatedFailedStreamWithRSTStream(tc testpb.TestServiceClient, t *testing.T, l *listenerWrapper) { +func doServerSideInitiatedFailedStreamWithRSTStream(tc testgrpc.TestServiceClient, t *testing.T, l *listenerWrapper) { stream, err := tc.FullDuplexCall(context.Background()) if err != nil { t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want ", err) @@ -808,7 +814,7 @@ func doServerSideInitiatedFailedStreamWithRSTStream(tc testpb.TestServiceClient, } // this func is to be used to test client side counting of failed streams. -func doServerSideInitiatedFailedStreamWithGoAway(tc testpb.TestServiceClient, t *testing.T, l *listenerWrapper) { +func doServerSideInitiatedFailedStreamWithGoAway(tc testgrpc.TestServiceClient, t *testing.T, l *listenerWrapper) { // This call is just to keep the transport from shutting down (socket will be deleted // in this case, and we will not be able to get metrics). s, err := tc.FullDuplexCall(context.Background()) @@ -850,7 +856,7 @@ func doServerSideInitiatedFailedStreamWithGoAway(tc testpb.TestServiceClient, t } } -func doIdleCallToInvokeKeepAlive(tc testpb.TestServiceClient, t *testing.T) { +func doIdleCallToInvokeKeepAlive(tc testgrpc.TestServiceClient, t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) _, err := tc.FullDuplexCall(ctx) if err != nil { @@ -862,7 +868,7 @@ func doIdleCallToInvokeKeepAlive(tc testpb.TestServiceClient, t *testing.T) { } func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -871,7 +877,7 @@ func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) { rcw := te.startServerWithConnControl(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} doSuccessfulUnaryCall(tc, t) var scID, skID int64 @@ -962,7 +968,7 @@ func (s) TestCZClientSocketMetricsStreamsAndMessagesCount(t *testing.T) { // It is separated from other cases due to setup incompatibly, i.e. max receive // size violation will mask flow control violation. func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -970,7 +976,7 @@ func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *t // Avoid overflowing connection level flow control window, which will lead to // transport being closed. te.serverInitialConnWindowSize = 65536 * 2 - ts := &stubserver.StubServer{FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + ts := &stubserver.StubServer{FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { stream.Send(&testpb.StreamingOutputCallResponse{}) <-stream.Context().Done() return status.Errorf(codes.DeadlineExceeded, "deadline exceeded or cancelled") @@ -978,7 +984,7 @@ func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *t te.startServer(ts) defer te.tearDown() cc, dw := te.clientConnWithConnControl() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) stream, err := tc.FullDuplexCall(ctx) @@ -1046,7 +1052,7 @@ func (s) TestCZClientAndServerSocketMetricsStreamsCountFlowControlRSTStream(t *t } func (s) TestCZClientAndServerSocketMetricsFlowControl(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1058,7 +1064,7 @@ func (s) TestCZClientAndServerSocketMetricsFlowControl(t *testing.T) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) for i := 0; i < 10; i++ { doSuccessfulUnaryCall(tc, t) @@ -1159,7 +1165,7 @@ func (s) TestCZClientAndServerSocketMetricsFlowControl(t *testing.T) { } func (s) TestCZClientSocketMetricsKeepAlive(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) defer func(t time.Duration) { internal.KeepaliveMinPingTime = t }(internal.KeepaliveMinPingTime) internal.KeepaliveMinPingTime = time.Second @@ -1212,7 +1218,7 @@ func (s) TestCZClientSocketMetricsKeepAlive(t *testing.T) { } func (s) TestCZServerSocketMetricsStreamsAndMessagesCount(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1221,7 +1227,7 @@ func (s) TestCZServerSocketMetricsStreamsAndMessagesCount(t *testing.T) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc, _ := te.clientConnWithConnControl() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} var svrID int64 if err := verifyResultWithDelay(func() (bool, error) { @@ -1273,7 +1279,7 @@ func (s) TestCZServerSocketMetricsStreamsAndMessagesCount(t *testing.T) { } func (s) TestCZServerSocketMetricsKeepAlive(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1292,7 +1298,7 @@ func (s) TestCZServerSocketMetricsKeepAlive(t *testing.T) { te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) start := time.Now() doIdleCallToInvokeKeepAlive(tc, t) @@ -1345,7 +1351,7 @@ var cipherSuites = []string{ } func (s) TestCZSocketGetSecurityValueTLS(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpTLSRREnv te := newTest(t, e) @@ -1380,7 +1386,7 @@ func (s) TestCZSocketGetSecurityValueTLS(t *testing.T) { if !ok { return false, fmt.Errorf("the SocketData.Security is of type: %T, want: *credentials.TLSChannelzSecurityValue", skt.SocketData.Security) } - if !reflect.DeepEqual(securityVal.RemoteCertificate, cert.Certificate[0]) { + if !cmp.Equal(securityVal.RemoteCertificate, cert.Certificate[0]) { return false, fmt.Errorf("SocketData.Security.RemoteCertificate got: %v, want: %v", securityVal.RemoteCertificate, cert.Certificate[0]) } for _, v := range cipherSuites { @@ -1395,8 +1401,9 @@ func (s) TestCZSocketGetSecurityValueTLS(t *testing.T) { } func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) + e := tcpClearRREnv // avoid calling API to set balancer type, which will void service config's change of balancer. e.balancer = "" @@ -1407,6 +1414,7 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { te.resolverScheme = r.Scheme() te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() + var nestedConn int64 if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) @@ -1431,15 +1439,19 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { if len(ncm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for nested channel not 0") } - if ncm.Trace.Events[0].Desc != "Channel Created" { - return false, fmt.Errorf("the first trace event should be \"Channel Created\", not %q", ncm.Trace.Events[0].Desc) + pattern := `Channel created` + if ok, _ := regexp.MatchString(pattern, ncm.Trace.Events[0].Desc); !ok { + return false, fmt.Errorf("the first trace event should be %q, not %q", pattern, ncm.Trace.Events[0].Desc) } return true, nil }); err != nil { t.Fatal(err) } - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { @@ -1460,8 +1472,9 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { if len(ncm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for nested channel not 0") } - if ncm.Trace.Events[len(ncm.Trace.Events)-1].Desc != "Channel Deleted" { - return false, fmt.Errorf("the first trace event should be \"Channel Deleted\", not %q", ncm.Trace.Events[0].Desc) + pattern := `Channel created` + if ok, _ := regexp.MatchString(pattern, ncm.Trace.Events[0].Desc); !ok { + return false, fmt.Errorf("the first trace event should be %q, not %q", pattern, ncm.Trace.Events[0].Desc) } return true, nil }); err != nil { @@ -1470,7 +1483,7 @@ func (s) TestCZChannelTraceCreationDeletion(t *testing.T) { } func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1509,29 +1522,20 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { if len(scm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } - if scm.Trace.Events[0].Desc != "Subchannel Created" { - return false, fmt.Errorf("the first trace event should be \"Subchannel Created\", not %q", scm.Trace.Events[0].Desc) + pattern := `Subchannel created` + if ok, _ := regexp.MatchString(pattern, scm.Trace.Events[0].Desc); !ok { + return false, fmt.Errorf("the first trace event should be %q, not %q", pattern, scm.Trace.Events[0].Desc) } return true, nil }); err != nil { t.Fatal(err) } - // Wait for ready ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - for src := te.cc.GetState(); src != connectivity.Ready; src = te.cc.GetState() { - if !te.cc.WaitForStateChange(ctx, src) { - t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.Ready) - } - } + awaitState(ctx, t, te.cc, connectivity.Ready) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) - // Wait for not-ready. - for src := te.cc.GetState(); src == connectivity.Ready; src = te.cc.GetState() { - if !te.cc.WaitForStateChange(ctx, src) { - t.Fatalf("timed out waiting for state change. got %v; want !%v", src, connectivity.Ready) - } - } + awaitNotState(ctx, t, te.cc, connectivity.Ready) if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) @@ -1551,10 +1555,12 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { if len(scm.Trace.Events) == 0 { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } - if got, want := scm.Trace.Events[len(scm.Trace.Events)-1].Desc, "Subchannel Deleted"; got != want { - return false, fmt.Errorf("the last trace event should be %q, not %q", want, got) - } + pattern := `Subchannel deleted` + desc := scm.Trace.Events[len(scm.Trace.Events)-1].Desc + if ok, _ := regexp.MatchString(pattern, desc); !ok { + return false, fmt.Errorf("the last trace event should be %q, not %q", pattern, desc) + } return true, nil }); err != nil { t.Fatal(err) @@ -1562,7 +1568,7 @@ func (s) TestCZSubChannelTraceCreationDeletion(t *testing.T) { } func (s) TestCZChannelAddressResolutionChange(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv e.balancer = "" @@ -1595,12 +1601,15 @@ func (s) TestCZChannelAddressResolutionChange(t *testing.T) { }); err != nil { t.Fatal(err) } - r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) if err := verifyResultWithDelay(func() (bool, error) { cm := channelz.GetChannel(cid) for i := len(cm.Trace.Events) - 1; i >= 0; i-- { - if cm.Trace.Events[i].Desc == fmt.Sprintf("Channel switches to new LB policy %q", roundrobin.Name) { + if strings.Contains(cm.Trace.Events[i].Desc, fmt.Sprintf("Channel switches to new LB policy %q", roundrobin.Name)) { break } if i == 0 { @@ -1612,7 +1621,7 @@ func (s) TestCZChannelAddressResolutionChange(t *testing.T) { t.Fatal(err) } - newSC := parseCfg(r, `{ + newSC := parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1665,7 +1674,7 @@ func (s) TestCZChannelAddressResolutionChange(t *testing.T) { } func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv e.balancer = "" @@ -1680,7 +1689,7 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -1689,8 +1698,22 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { } te.srvs[0].Stop() te.srvs[1].Stop() - // Here, we just wait for all sockets to be up. In the future, if we implement - // IDLE, we may need to make several rpc calls to create the sockets. + // Here, we just wait for all sockets to be up. Make several rpc calls to + // create the sockets since we do not automatically reconnect. + done := make(chan struct{}) + defer close(done) + go func() { + for { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + tc.EmptyCall(ctx, &testpb.Empty{}) + cancel() + select { + case <-time.After(10 * time.Millisecond): + case <-done: + return + } + } + }() if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { @@ -1711,7 +1734,7 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } for i := len(scm.Trace.Events) - 1; i >= 0; i-- { - if scm.Trace.Events[i].Desc == fmt.Sprintf("Subchannel picks a new address %q to connect", te.srvAddrs[2]) { + if strings.Contains(scm.Trace.Events[i].Desc, fmt.Sprintf("Subchannel picks a new address %q to connect", te.srvAddrs[2])) { break } if i == 0 { @@ -1725,7 +1748,7 @@ func (s) TestCZSubChannelPickedNewAddress(t *testing.T) { } func (s) TestCZSubChannelConnectivityState(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1735,16 +1758,16 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } - var subConn int64 te.srv.Stop() + var subConn int64 if err := verifyResultWithDelay(func() (bool, error) { // we need to obtain the SubChannel id before it gets deleted from Channel's children list (due // to effect of r.UpdateState(resolver.State{Addresses:[]resolver.Address{}})) @@ -1759,6 +1782,7 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { for k := range tcs[0].SubChans { // get the SubChannel id for further trace inquiry. subConn = k + t.Logf("SubChannel Id is %d", subConn) } } scm := channelz.GetSubChannel(subConn) @@ -1772,8 +1796,10 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { return false, fmt.Errorf("there should be at least one trace event for subChannel not 0") } var ready, connecting, transient, shutdown int + t.Log("SubChannel trace events seen so far...") for _, e := range scm.Trace.Events { - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure) { + t.Log(e.Desc) + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure)) { transient++ } } @@ -1784,17 +1810,19 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { } transient = 0 r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) + t.Log("SubChannel trace events seen so far...") for _, e := range scm.Trace.Events { - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready) { + t.Log(e.Desc) + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Ready)) { ready++ } - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Connecting) { + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Connecting)) { connecting++ } - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure) { + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.TransientFailure)) { transient++ } - if e.Desc == fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Shutdown) { + if strings.Contains(e.Desc, fmt.Sprintf("Subchannel Connectivity change to %v", connectivity.Shutdown)) { shutdown++ } } @@ -1819,7 +1847,7 @@ func (s) TestCZSubChannelConnectivityState(t *testing.T) { } func (s) TestCZChannelConnectivityState(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1829,7 +1857,7 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { te.resolverScheme = r.Scheme() cc := te.clientConn(grpc.WithResolvers(r)) defer te.tearDown() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // make sure the connection is up ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -1837,6 +1865,7 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } te.srv.Stop() + if err := verifyResultWithDelay(func() (bool, error) { tcs, _ := channelz.GetTopChannels(0, 0) if len(tcs) != 1 { @@ -1844,14 +1873,16 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { } var ready, connecting, transient int + t.Log("Channel trace events seen so far...") for _, e := range tcs[0].Trace.Events { - if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready) { + t.Log(e.Desc) + if strings.Contains(e.Desc, fmt.Sprintf("Channel Connectivity change to %v", connectivity.Ready)) { ready++ } - if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.Connecting) { + if strings.Contains(e.Desc, fmt.Sprintf("Channel Connectivity change to %v", connectivity.Connecting)) { connecting++ } - if e.Desc == fmt.Sprintf("Channel Connectivity change to %v", connectivity.TransientFailure) { + if strings.Contains(e.Desc, fmt.Sprintf("Channel Connectivity change to %v", connectivity.TransientFailure)) { transient++ } } @@ -1875,11 +1906,9 @@ func (s) TestCZChannelConnectivityState(t *testing.T) { } func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv - // avoid newTest using WithBalancerName, which would override service - // config's change of balancer below. e.balancer = "" te := newTest(t, e) channelz.SetMaxTraceEntry(1) @@ -1907,7 +1936,10 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { t.Fatal(err) } - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) // wait for the shutdown of grpclb balancer if err := verifyResultWithDelay(func() (bool, error) { @@ -1925,7 +1957,10 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { // If nested channel deletion is last trace event before the next validation, it will fail, as the top channel will hold a reference to it. // This line forces a trace event on the top channel in that case. - r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, ServiceConfig: parseCfg(r, `{"loadBalancingPolicy": "round_robin"}`)}) + r.UpdateState(resolver.State{ + Addresses: []resolver.Address{{Addr: "127.0.0.1:0"}}, + ServiceConfig: parseServiceConfig(t, r, `{"loadBalancingPolicy": "round_robin"}`), + }) // verify that the nested channel no longer exist due to trace referencing it got overwritten. if err := verifyResultWithDelay(func() (bool, error) { @@ -1940,7 +1975,7 @@ func (s) TestCZTraceOverwriteChannelDeletion(t *testing.T) { } func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) @@ -1971,21 +2006,11 @@ func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) { t.Fatal(err) } - // Wait for ready ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - for src := te.cc.GetState(); src != connectivity.Ready; src = te.cc.GetState() { - if !te.cc.WaitForStateChange(ctx, src) { - t.Fatalf("timed out waiting for state change. got %v; want %v", src, connectivity.Ready) - } - } + awaitState(ctx, t, te.cc, connectivity.Ready) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "fake address"}}}) - // Wait for not-ready. - for src := te.cc.GetState(); src == connectivity.Ready; src = te.cc.GetState() { - if !te.cc.WaitForStateChange(ctx, src) { - t.Fatalf("timed out waiting for state change. got %v; want !%v", src, connectivity.Ready) - } - } + awaitNotState(ctx, t, te.cc, connectivity.Ready) // verify that the subchannel no longer exist due to trace referencing it got overwritten. if err := verifyResultWithDelay(func() (bool, error) { @@ -2000,7 +2025,7 @@ func (s) TestCZTraceOverwriteSubChannelDeletion(t *testing.T) { } func (s) TestCZTraceTopChannelDeletionTraceClear(t *testing.T) { - czCleanup := channelz.NewChannelzStorage() + czCleanup := channelz.NewChannelzStorageForTesting() defer czCleanupWrapper(czCleanup, t) e := tcpClearRREnv te := newTest(t, e) diff --git a/clientconn_state_transition_test.go b/test/clientconn_state_transition_test.go similarity index 75% rename from clientconn_state_transition_test.go rename to test/clientconn_state_transition_test.go index 0c58131a1c6f..a14ff4588a0f 100644 --- a/clientconn_state_transition_test.go +++ b/test/clientconn_state_transition_test.go @@ -16,24 +16,28 @@ * */ -package grpc +package test import ( "context" + "fmt" "net" "sync" "testing" "time" "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" ) -const stateRecordingBalancerName = "state_recoding_balancer" +const stateRecordingBalancerName = "state_recording_balancer" var testBalancerBuilder = newStateRecordingBalancerBuilder() @@ -75,7 +79,7 @@ func (s) TestStateTransitions_SingleAddress(t *testing.T) { }, }, { - desc: "When the connection is closed, the client enters TRANSIENT FAILURE.", + desc: "When the connection is closed before the preface is sent, the client enters TRANSIENT FAILURE.", want: []connectivity.State{ connectivity.Connecting, connectivity.TransientFailure, @@ -141,9 +145,6 @@ client enters TRANSIENT FAILURE.`, } func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, server func(net.Listener) net.Conn) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - pl := testutils.NewPipeListener() defer pl.Close() @@ -156,25 +157,27 @@ func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, s connMu.Unlock() }() - client, err := DialContext(ctx, - "", - WithInsecure(), - WithBalancerName(stateRecordingBalancerName), - WithDialer(pl.Dialer()), - withBackoff(noBackoff{}), - withMinConnectDeadline(func() time.Duration { return time.Millisecond * 100 })) + client, err := grpc.Dial("", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + grpc.WithDialer(pl.Dialer()), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{}, + MinConnectTimeout: 100 * time.Millisecond, + })) if err != nil { t.Fatal(err) } defer client.Close() - stateNotifications := testBalancerBuilder.nextStateNotifier() - - timeout := time.After(5 * time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + go stayConnected(ctx, client) + stateNotifications := testBalancerBuilder.nextStateNotifier() for i := 0; i < len(want); i++ { select { - case <-timeout: + case <-time.After(defaultTestTimeout): t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen != want[i] { @@ -193,24 +196,16 @@ func testStateTransitionSingleAddress(t *testing.T, want []connectivity.State, s } } -// When a READY connection is closed, the client enters CONNECTING. +// When a READY connection is closed, the client enters IDLE then CONNECTING. func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { - want := []connectivity.State{ - connectivity.Connecting, - connectivity.Ready, - connectivity.Connecting, - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) } defer lis.Close() - sawReady := make(chan struct{}) + sawReady := make(chan struct{}, 1) + defer close(sawReady) // Launch the server. go func() { @@ -234,23 +229,33 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { conn.Close() }() - client, err := DialContext(ctx, lis.Addr().String(), WithInsecure(), WithBalancerName(stateRecordingBalancerName)) + client, err := grpc.Dial(lis.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName))) if err != nil { t.Fatal(err) } defer client.Close() - stateNotifications := testBalancerBuilder.nextStateNotifier() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + go stayConnected(ctx, client) - timeout := time.After(5 * time.Second) + stateNotifications := testBalancerBuilder.nextStateNotifier() + want := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Idle, + connectivity.Connecting, + } for i := 0; i < len(want); i++ { select { - case <-timeout: + case <-time.After(defaultTestTimeout): t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen == connectivity.Ready { - close(sawReady) + sawReady <- struct{}{} } if seen != want[i] { t.Fatalf("expected to see %v at position %d in flow %v, got %v", want[i], i, want, seen) @@ -262,14 +267,6 @@ func (s) TestStateTransitions_ReadyToConnecting(t *testing.T) { // When the first connection is closed, the client stays in CONNECTING until it // tries the second address (which succeeds, and then it enters READY). func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) { - want := []connectivity.State{ - connectivity.Connecting, - connectivity.Ready, - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) @@ -320,19 +317,25 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) - client, err := DialContext(ctx, "whatever:///this-gets-overwritten", WithInsecure(), WithBalancerName(stateRecordingBalancerName), WithResolvers(rb)) + client, err := grpc.Dial("whatever:///this-gets-overwritten", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + grpc.WithResolvers(rb)) if err != nil { t.Fatal(err) } defer client.Close() stateNotifications := testBalancerBuilder.nextStateNotifier() - - timeout := time.After(5 * time.Second) - + want := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() for i := 0; i < len(want); i++ { select { - case <-timeout: + case <-ctx.Done(): t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen != want[i] { @@ -341,12 +344,12 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) } } select { - case <-timeout: + case <-ctx.Done(): t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 1") case <-server1Done: } select { - case <-timeout: + case <-ctx.Done(): t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 2") case <-server2Done: } @@ -355,15 +358,6 @@ func (s) TestStateTransitions_TriesAllAddrsBeforeTransientFailure(t *testing.T) // When there are multiple addresses, and we enter READY on one of them, a // later closure should cause the client to enter CONNECTING func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { - want := []connectivity.State{ - connectivity.Connecting, - connectivity.Ready, - connectivity.Connecting, - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - lis1, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Error while listening. Err: %v", err) @@ -378,7 +372,8 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { defer lis2.Close() server1Done := make(chan struct{}) - sawReady := make(chan struct{}) + sawReady := make(chan struct{}, 1) + defer close(sawReady) // Launch server 1. go func() { @@ -400,12 +395,6 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { conn.Close() - _, err = lis1.Accept() - if err != nil { - t.Error(err) - return - } - close(server1Done) }() @@ -414,23 +403,33 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { {Addr: lis1.Addr().String()}, {Addr: lis2.Addr().String()}, }}) - client, err := DialContext(ctx, "whatever:///this-gets-overwritten", WithInsecure(), WithBalancerName(stateRecordingBalancerName), WithResolvers(rb)) + client, err := grpc.Dial("whatever:///this-gets-overwritten", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, stateRecordingBalancerName)), + grpc.WithResolvers(rb)) if err != nil { t.Fatal(err) } defer client.Close() - stateNotifications := testBalancerBuilder.nextStateNotifier() - - timeout := time.After(2 * time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + go stayConnected(ctx, client) + stateNotifications := testBalancerBuilder.nextStateNotifier() + want := []connectivity.State{ + connectivity.Connecting, + connectivity.Ready, + connectivity.Idle, + connectivity.Connecting, + } for i := 0; i < len(want); i++ { select { - case <-timeout: + case <-ctx.Done(): t.Fatalf("timed out waiting for state %d (%v) in flow %v", i, want[i], want) case seen := <-stateNotifications: if seen == connectivity.Ready { - close(sawReady) + sawReady <- struct{}{} } if seen != want[i] { t.Fatalf("expected to see %v at position %d in flow %v, got %v", want[i], i, want, seen) @@ -438,7 +437,7 @@ func (s) TestStateTransitions_MultipleAddrsEntersReady(t *testing.T) { } } select { - case <-timeout: + case <-ctx.Done(): t.Fatal("saw the correct state transitions, but timed out waiting for client to finish interactions with server 1") case <-server1Done: } @@ -482,7 +481,7 @@ func (b *stateRecordingBalancerBuilder) Build(cc balancer.ClientConn, opts balan b.mu.Unlock() return &stateRecordingBalancer{ notifier: stateNotifications, - Balancer: balancer.Get(PickFirstBalancerName).Build(cc, opts), + Balancer: balancer.Get("pick_first").Build(cc, opts), } } @@ -494,10 +493,6 @@ func (b *stateRecordingBalancerBuilder) nextStateNotifier() <-chan connectivity. return ret } -type noBackoff struct{} - -func (b noBackoff) Backoff(int) time.Duration { return time.Duration(0) } - // Keep reading until something causes the connection to die (EOF, server // closed, etc). Useful as a tool for mindlessly keeping the connection // healthy, since the client will error if things like client prefaces are not @@ -507,3 +502,45 @@ func keepReading(conn net.Conn) { for _, err := conn.Read(buf); err == nil; _, err = conn.Read(buf) { } } + +// stayConnected makes cc stay connected by repeatedly calling cc.Connect() +// until the state becomes Shutdown or until ithe context expires. +func stayConnected(ctx context.Context, cc *grpc.ClientConn) { + for { + state := cc.GetState() + switch state { + case connectivity.Idle: + cc.Connect() + case connectivity.Shutdown: + return + } + if !cc.WaitForStateChange(ctx, state) { + return + } + } +} + +func awaitState(ctx context.Context, t *testing.T, cc *grpc.ClientConn, stateWant connectivity.State) { + t.Helper() + for state := cc.GetState(); state != stateWant; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Fatalf("timed out waiting for state change. got %v; want %v", state, stateWant) + } + } +} + +func awaitNotState(ctx context.Context, t *testing.T, cc *grpc.ClientConn, stateDoNotWant connectivity.State) { + t.Helper() + for state := cc.GetState(); state == stateDoNotWant; state = cc.GetState() { + if !cc.WaitForStateChange(ctx, state) { + t.Fatalf("timed out waiting for state change. got %v; want NOT %v", state, stateDoNotWant) + } + } +} + +func awaitNoStateChange(ctx context.Context, t *testing.T, cc *grpc.ClientConn, currState connectivity.State) { + t.Helper() + if cc.WaitForStateChange(ctx, currState) { + t.Fatalf("State changed from %q to %q when no state change was expected", currState, cc.GetState()) + } +} diff --git a/test/clientconn_test.go b/test/clientconn_test.go new file mode 100644 index 000000000000..bdbe81d03040 --- /dev/null +++ b/test/clientconn_test.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/channelz" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" +) + +// TestClientConnClose_WithPendingRPC tests the scenario where the channel has +// not yet received any update from the name resolver and hence RPCs are +// blocking. The test verifies that closing the ClientConn unblocks the RPC with +// the expected error code. +func (s) TestClientConnClose_WithPendingRPC(t *testing.T) { + // Initialize channelz. Used to determine pending RPC count. + czCleanup := channelz.NewChannelzStorageForTesting() + defer czCleanupWrapper(czCleanup, t) + + r := manual.NewBuilderWithScheme("whatever") + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + client := testgrpc.NewTestServiceClient(cc) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + doneErrCh := make(chan error, 1) + go func() { + // This RPC would block until the ClientConn is closed, because the + // resolver has not provided its first update yet. + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if status.Code(err) != codes.Canceled || !strings.Contains(err.Error(), "client connection is closing") { + doneErrCh <- fmt.Errorf("EmptyCall() = %v, want %s", err, codes.Canceled) + } + doneErrCh <- nil + }() + + // Make sure that there is one pending RPC on the ClientConn before attempting + // to close it. If we don't do this, cc.Close() can happen before the above + // goroutine gets to make the RPC. + for { + if err := ctx.Err(); err != nil { + t.Fatal(err) + } + tcs, _ := channelz.GetTopChannels(0, 0) + if len(tcs) != 1 { + t.Fatalf("there should only be one top channel, not %d", len(tcs)) + } + started := tcs[0].ChannelData.CallsStarted + completed := tcs[0].ChannelData.CallsSucceeded + tcs[0].ChannelData.CallsFailed + if (started - completed) == 1 { + break + } + time.Sleep(defaultTestShortTimeout) + } + cc.Close() + if err := <-doneErrCh; err != nil { + t.Fatal(err) + } +} diff --git a/test/clienttester.go b/test/clienttester.go new file mode 100644 index 000000000000..7e223091164d --- /dev/null +++ b/test/clienttester.go @@ -0,0 +1,109 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package test + +import ( + "bytes" + "io" + "net" + "testing" + + "golang.org/x/net/http2" +) + +var ( + clientPreface = []byte(http2.ClientPreface) +) + +func newClientTester(t *testing.T, conn net.Conn) *clientTester { + ct := &clientTester{ + t: t, + conn: conn, + } + ct.fr = http2.NewFramer(conn, conn) + ct.greet() + return ct +} + +type clientTester struct { + t *testing.T + conn net.Conn + fr *http2.Framer +} + +// greet() performs the necessary steps for http2 connection establishment on +// the server side. +func (ct *clientTester) greet() { + ct.wantClientPreface() + ct.wantSettingsFrame() + ct.writeSettingsFrame() + ct.writeSettingsAck() + + for { + f, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Errorf("error reading frame from client side: %v", err) + } + switch f := f.(type) { + case *http2.SettingsFrame: + if f.IsAck() { // HTTP/2 handshake completed. + return + } + default: + ct.t.Errorf("during greet, unexpected frame type %T", f) + } + } +} + +func (ct *clientTester) wantClientPreface() { + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(ct.conn, preface); err != nil { + ct.t.Errorf("Error at server-side while reading preface from client. Err: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + ct.t.Errorf("received bogus greeting from client %q", preface) + } +} + +func (ct *clientTester) wantSettingsFrame() { + frame, err := ct.fr.ReadFrame() + if err != nil { + ct.t.Errorf("error reading initial settings frame from client: %v", err) + } + _, ok := frame.(*http2.SettingsFrame) + if !ok { + ct.t.Errorf("initial frame sent from client is not a settings frame, type %T", frame) + } +} + +func (ct *clientTester) writeSettingsFrame() { + if err := ct.fr.WriteSettings(); err != nil { + ct.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) + } +} + +func (ct *clientTester) writeSettingsAck() { + if err := ct.fr.WriteSettingsAck(); err != nil { + ct.t.Fatalf("Error writing ACK of client's SETTINGS: %v", err) + } +} + +func (ct *clientTester) writeGoAway(maxStreamID uint32, code http2.ErrCode, debugData []byte) { + if err := ct.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { + ct.t.Fatalf("Error writing GOAWAY: %v", err) + } +} diff --git a/test/codec_perf/perf.pb.go b/test/codec_perf/perf.pb.go index d09ce726b63f..c166c79e3db9 100644 --- a/test/codec_perf/perf.pb.go +++ b/test/codec_perf/perf.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.30.0 +// protoc v4.22.0 // source: test/codec_perf/perf.proto package codec_perf import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Buffer is a message that contains a body of bytes that is used to exercise // encoding and decoding overheads. type Buffer struct { diff --git a/test/compressor_test.go b/test/compressor_test.go new file mode 100644 index 000000000000..89daa765eeff --- /dev/null +++ b/test/compressor_test.go @@ -0,0 +1,708 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "bytes" + "compress/gzip" + "context" + "io" + "reflect" + "strings" + "sync/atomic" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func (s) TestCompressServerHasNoSupport(t *testing.T) { + for _, e := range listTestEnv() { + testCompressServerHasNoSupport(t, e) + } +} + +func testCompressServerHasNoSupport(t *testing.T, e env) { + te := newTest(t, e) + te.serverCompression = false + te.clientCompression = false + te.clientNopCompression = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: respSize, + Payload: payload, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.Unimplemented { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented) + } + // Streaming RPC + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented { + t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented) + } +} + +func (s) TestCompressOK(t *testing.T) { + for _, e := range listTestEnv() { + testCompressOK(t, e) + } +} + +func testCompressOK(t *testing.T, e env) { + te := newTest(t, e) + te.serverCompression = true + te.clientCompression = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + // Unary call + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: respSize, + Payload: payload, + } + ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + // Streaming RPC + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: 31415, + }, + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + stream.CloseSend() + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) + } +} + +func (s) TestIdentityEncoding(t *testing.T) { + for _, e := range listTestEnv() { + testIdentityEncoding(t, e) + } +} + +func testIdentityEncoding(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + // Unary call + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: 10, + Payload: payload, + } + ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + // Streaming RPC + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity")) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: []*testpb.ResponseParameters{{Size: 10}}, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + stream.CloseSend() + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) + } +} + +// renameCompressor is a grpc.Compressor wrapper that allows customizing the +// Type() of another compressor. +type renameCompressor struct { + grpc.Compressor + name string +} + +func (r *renameCompressor) Type() string { return r.name } + +// renameDecompressor is a grpc.Decompressor wrapper that allows customizing the +// Type() of another Decompressor. +type renameDecompressor struct { + grpc.Decompressor + name string +} + +func (r *renameDecompressor) Type() string { return r.name } + +func (s) TestClientForwardsGrpcAcceptEncodingHeader(t *testing.T) { + wantGrpcAcceptEncodingCh := make(chan []string, 1) + defer close(wantGrpcAcceptEncodingCh) + + compressor := renameCompressor{Compressor: grpc.NewGZIPCompressor(), name: "testgzip"} + decompressor := renameDecompressor{Decompressor: grpc.NewGZIPDecompressor(), name: "testgzip"} + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Internal, "no metadata in context") + } + if got, want := md["grpc-accept-encoding"], <-wantGrpcAcceptEncodingCh; !reflect.DeepEqual(got, want) { + return nil, status.Errorf(codes.Internal, "got grpc-accept-encoding=%q; want [%q]", got, want) + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{grpc.RPCDecompressor(&decompressor)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantGrpcAcceptEncodingCh <- []string{"gzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + wantGrpcAcceptEncodingCh <- []string{"gzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.UseCompressor("gzip")); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + // Use compressor directly which is not registered via + // encoding.RegisterCompressor. + if err := ss.StartClient(grpc.WithCompressor(&compressor)); err != nil { + t.Fatalf("Error starting client: %v", err) + } + wantGrpcAcceptEncodingCh <- []string{"gzip,testgzip"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } +} + +// wrapCompressor is a wrapper of encoding.Compressor which maintains count of +// Compressor method invokes. +type wrapCompressor struct { + encoding.Compressor + compressInvokes int32 +} + +func (wc *wrapCompressor) Compress(w io.Writer) (io.WriteCloser, error) { + atomic.AddInt32(&wc.compressInvokes, 1) + return wc.Compressor.Compress(w) +} + +func setupGzipWrapCompressor(t *testing.T) *wrapCompressor { + oldC := encoding.GetCompressor("gzip") + c := &wrapCompressor{Compressor: oldC} + encoding.RegisterCompressor(c) + t.Cleanup(func() { + encoding.RegisterCompressor(oldC) + }) + return c +} + +func (s) TestSetSendCompressorSuccess(t *testing.T) { + for _, tt := range []struct { + name string + desc string + dialOpts []grpc.DialOption + resCompressor string + wantCompressInvokes int32 + }{ + { + name: "identity_request_and_gzip_response", + desc: "request is uncompressed and response is gzip compressed", + resCompressor: "gzip", + wantCompressInvokes: 1, + }, + { + name: "gzip_request_and_identity_response", + desc: "request is gzip compressed and response is uncompressed with identity", + resCompressor: "identity", + dialOpts: []grpc.DialOption{ + // Use WithCompressor instead of UseCompressor to avoid counting + // the client's compressor usage. + grpc.WithCompressor(grpc.NewGZIPCompressor()), + }, + wantCompressInvokes: 0, + }, + } { + t.Run(tt.name, func(t *testing.T) { + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorSuccess(t, tt.resCompressor, tt.wantCompressInvokes, tt.dialOpts) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorSuccess(t, tt.resCompressor, tt.wantCompressInvokes, tt.dialOpts) + }) + }) + } +} + +func testUnarySetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { + wc := setupGzipWrapCompressor(t) + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if err := grpc.SetSendCompressor(ctx, resCompressor); err != nil { + return nil, err + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil, dialOpts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("Unexpected unary call error, got: %v, want: nil", err) + } + + compressInvokes := atomic.LoadInt32(&wc.compressInvokes) + if compressInvokes != wantCompressInvokes { + t.Fatalf("Unexpected compress invokes, got:%d, want: %d", compressInvokes, wantCompressInvokes) + } +} + +func testStreamSetSendCompressorSuccess(t *testing.T, resCompressor string, wantCompressInvokes int32, dialOpts []grpc.DialOption) { + wc := setupGzipWrapCompressor(t) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + + if err := grpc.SetSendCompressor(stream.Context(), resCompressor); err != nil { + return err + } + + return stream.Send(&testpb.StreamingOutputCallResponse{}) + }, + } + if err := ss.Start(nil, dialOpts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if err := s.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("Unexpected full duplex call send error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); err != nil { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: nil", err) + } + + compressInvokes := atomic.LoadInt32(&wc.compressInvokes) + if compressInvokes != wantCompressInvokes { + t.Fatalf("Unexpected compress invokes, got:%d, want: %d", compressInvokes, wantCompressInvokes) + } +} + +func (s) TestUnregisteredSetSendCompressorFailure(t *testing.T) { + resCompressor := "snappy2" + wantErr := status.Error(codes.Unknown, "unable to set send compressor: compressor not registered \"snappy2\"") + + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorFailure(t, resCompressor, wantErr) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorFailure(t, resCompressor, wantErr) + }) +} + +func (s) TestUnadvertisedSetSendCompressorFailure(t *testing.T) { + // Disable client compressor advertisement. + defer func(b bool) { envconfig.AdvertiseCompressors = b }(envconfig.AdvertiseCompressors) + envconfig.AdvertiseCompressors = false + + resCompressor := "gzip" + wantErr := status.Error(codes.Unknown, "unable to set send compressor: client does not support compressor \"gzip\"") + + t.Run("unary", func(t *testing.T) { + testUnarySetSendCompressorFailure(t, resCompressor, wantErr) + }) + + t.Run("stream", func(t *testing.T) { + testStreamSetSendCompressorFailure(t, resCompressor, wantErr) + }) +} + +func testUnarySetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + if err := grpc.SetSendCompressor(ctx, resCompressor); err != nil { + return nil, err + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !equalError(err, wantErr) { + t.Fatalf("Unexpected unary call error, got: %v, want: %v", err, wantErr) + } +} + +func testStreamSetSendCompressorFailure(t *testing.T, resCompressor string, wantErr error) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + + if err := grpc.SetSendCompressor(stream.Context(), resCompressor); err != nil { + return err + } + + return stream.Send(&testpb.StreamingOutputCallResponse{}) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v, want: nil", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if err := s.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("Unexpected full duplex call send error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); !equalError(err, wantErr) { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: nil", err) + } +} + +func (s) TestUnarySetSendCompressorAfterHeaderSendFailure(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + // Send headers early and then set send compressor. + grpc.SendHeader(ctx, metadata.MD{}) + err := grpc.SetSendCompressor(ctx, "gzip") + if err == nil { + t.Error("Wanted set send compressor error") + return &testpb.Empty{}, nil + } + return nil, err + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantErr := status.Error(codes.Unknown, "transport: set send compressor called after headers sent or stream done") + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !equalError(err, wantErr) { + t.Fatalf("Unexpected unary call error, got: %v, want: %v", err, wantErr) + } +} + +func (s) TestStreamSetSendCompressorAfterHeaderSendFailure(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + // Send headers early and then set send compressor. + grpc.SendHeader(stream.Context(), metadata.MD{}) + err := grpc.SetSendCompressor(stream.Context(), "gzip") + if err == nil { + t.Error("Wanted set send compressor error") + } + return err + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + wantErr := status.Error(codes.Unknown, "transport: set send compressor called after headers sent or stream done") + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Unexpected full duplex call error, got: %v, want: nil", err) + } + + if _, err := s.Recv(); !equalError(err, wantErr) { + t.Fatalf("Unexpected full duplex recv error, got: %v, want: %v", err, wantErr) + } +} + +func (s) TestClientSupportedCompressors(t *testing.T) { + for _, tt := range []struct { + desc string + ctx context.Context + want []string + }{ + { + desc: "No additional grpc-accept-encoding header", + ctx: context.Background(), + want: []string{"gzip"}, + }, + { + desc: "With additional grpc-accept-encoding header", + ctx: metadata.AppendToOutgoingContext(context.Background(), + "grpc-accept-encoding", "test-compressor-1", + "grpc-accept-encoding", "test-compressor-2", + ), + want: []string{"gzip", "test-compressor-1", "test-compressor-2"}, + }, + { + desc: "With additional empty grpc-accept-encoding header", + ctx: metadata.AppendToOutgoingContext(context.Background(), + "grpc-accept-encoding", "", + ), + want: []string{"gzip"}, + }, + } { + t.Run(tt.desc, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + got, err := grpc.ClientSupportedCompressors(ctx) + if err != nil { + return nil, err + } + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("unexpected client compressors got: %v, want: %v", got, tt.want) + } + + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v, want: nil", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(tt.ctx, defaultTestTimeout) + defer cancel() + + _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) + if err != nil { + t.Fatalf("Unexpected unary call error, got: %v, want: nil", err) + } + }) + } +} + +func (s) TestCompressorRegister(t *testing.T) { + for _, e := range listTestEnv() { + testCompressorRegister(t, e) + } +} + +func testCompressorRegister(t *testing.T, e env) { + te := newTest(t, e) + te.clientCompression = false + te.serverCompression = false + te.clientUseCompression = true + + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + tc := testgrpc.NewTestServiceClient(te.clientConn()) + + // Unary call + const argSize = 271828 + const respSize = 314159 + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) + if err != nil { + t.Fatal(err) + } + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: respSize, + Payload: payload, + } + ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) + } + // Streaming RPC + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := tc.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam := []*testpb.ResponseParameters{ + { + Size: 31415, + }, + } + payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + if err != nil { + t.Fatal(err) + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = %v, want ", stream, err) + } +} + +type badGzipCompressor struct{} + +func (badGzipCompressor) Do(w io.Writer, p []byte) error { + buf := &bytes.Buffer{} + gzw := gzip.NewWriter(buf) + if _, err := gzw.Write(p); err != nil { + return err + } + err := gzw.Close() + bs := buf.Bytes() + if len(bs) >= 6 { + bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte + } + w.Write(bs) + return err +} + +func (badGzipCompressor) Type() string { + return "gzip" +} + +func (s) TestGzipBadChecksum(t *testing.T) { + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } + if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024)) + if err != nil { + t.Fatalf("Unexpected error from newPayload: %v", err) + } + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil || + status.Code(err) != codes.Internal || + !strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) { + t.Errorf("ss.Client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum) + } +} diff --git a/test/resolver_test.go b/test/config_selector_test.go similarity index 98% rename from test/resolver_test.go rename to test/config_selector_test.go index 648245aef9c3..422bf77c018e 100644 --- a/test/resolver_test.go +++ b/test/config_selector_test.go @@ -31,11 +31,11 @@ import ( "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/internal/testutils" + testpb "google.golang.org/grpc/interop/grpc_testing" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" ) type funcConfigSelector struct { @@ -145,7 +145,7 @@ func (s) TestConfigSelector(t *testing.T) { var gotInfo *iresolver.RPCInfo state := iresolver.SetConfigSelector(resolver.State{ Addresses: []resolver.Address{{Addr: ss.Address}}, - ServiceConfig: parseCfg(ss.R, "{}"), + ServiceConfig: parseServiceConfig(t, ss.R, "{}"), }, funcConfigSelector{ f: func(i iresolver.RPCInfo) (*iresolver.RPCConfig, error) { gotInfo = &i @@ -211,5 +211,4 @@ func (s) TestConfigSelector(t *testing.T) { } }) } - } diff --git a/test/context_canceled_test.go b/test/context_canceled_test.go index 96ee69d8d521..a4b3810e16dc 100644 --- a/test/context_canceled_test.go +++ b/test/context_canceled_test.go @@ -29,12 +29,14 @@ import ( "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestContextCanceled(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { stream.SetTrailer(metadata.New(map[string]string{"a": "b"})) return status.Error(codes.PermissionDenied, "perm denied") }, @@ -123,7 +125,7 @@ func (s) TestContextCanceled(t *testing.T) { // will be inconsistent, and it causes internal error. func (s) TestCancelWhileRecvingWithCompression(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for { if err := stream.Send(&testpb.StreamingOutputCallResponse{ Payload: nil, diff --git a/test/control_plane_status_test.go b/test/control_plane_status_test.go new file mode 100644 index 000000000000..087dd30dd670 --- /dev/null +++ b/test/control_plane_status_test.go @@ -0,0 +1,234 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/stub" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stubserver" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" +) + +func (s) TestConfigSelectorStatusCodes(t *testing.T) { + testCases := []struct { + name string + csErr error + want error + }{{ + name: "legal status code", + csErr: status.Errorf(codes.Unavailable, "this error is fine"), + want: status.Errorf(codes.Unavailable, "this error is fine"), + }, { + name: "illegal status code", + csErr: status.Errorf(codes.NotFound, "this error is bad"), + want: status.Errorf(codes.Internal, "this error is bad"), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + ss.R = manual.NewBuilderWithScheme("confSel") + + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + state := iresolver.SetConfigSelector(resolver.State{ + Addresses: []resolver.Address{{Addr: ss.Address}}, + ServiceConfig: parseServiceConfig(t, ss.R, "{}"), + }, funcConfigSelector{ + f: func(i iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return nil, tc.csErr + }, + }) + ss.R.UpdateState(state) // Blocks until config selector is applied + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != status.Code(tc.want) || !strings.Contains(err.Error(), status.Convert(tc.want).Message()) { + t.Fatalf("client.EmptyCall(_, _) = _, %v; want _, %v", err, tc.want) + } + }) + } +} + +func (s) TestPickerStatusCodes(t *testing.T) { + testCases := []struct { + name string + pickerErr error + want error + }{{ + name: "legal status code", + pickerErr: status.Errorf(codes.Unavailable, "this error is fine"), + want: status.Errorf(codes.Unavailable, "this error is fine"), + }, { + name: "illegal status code", + pickerErr: status.Errorf(codes.NotFound, "this error is bad"), + want: status.Errorf(codes.Internal, "this error is bad"), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + // Create a stub balancer that creates a picker that always returns + // an error. + sbf := stub.BalancerFuncs{ + UpdateClientConnState: func(d *stub.BalancerData, _ balancer.ClientConnState) error { + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(tc.pickerErr), + }) + return nil + }, + } + stub.Register("testPickerStatusCodesBalancer", sbf) + + ss.NewServiceConfig(`{"loadBalancingConfig": [{"testPickerStatusCodesBalancer":{}}] }`) + + // Make calls until pickerErr is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + var lastErr error + for ctx.Err() == nil { + if _, lastErr = ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(lastErr) == status.Code(tc.want) && strings.Contains(lastErr.Error(), status.Convert(tc.want).Message()) { + // Success! + return + } + time.Sleep(time.Millisecond) + } + + t.Fatalf("client.EmptyCall(_, _) = _, %v; want _, %v", lastErr, tc.want) + }) + } +} + +func (s) TestCallCredsFromDialOptionsStatusCodes(t *testing.T) { + testCases := []struct { + name string + credsErr error + want error + }{{ + name: "legal status code", + credsErr: status.Errorf(codes.Unavailable, "this error is fine"), + want: status.Errorf(codes.Unavailable, "this error is fine"), + }, { + name: "illegal status code", + credsErr: status.Errorf(codes.NotFound, "this error is bad"), + want: status.Errorf(codes.Internal, "this error is bad"), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + errChan := make(chan error, 1) + creds := &testPerRPCCredentials{errChan: errChan} + + if err := ss.Start(nil, grpc.WithPerRPCCredentials(creds)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + errChan <- tc.credsErr + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != status.Code(tc.want) || !strings.Contains(err.Error(), status.Convert(tc.want).Message()) { + t.Fatalf("client.EmptyCall(_, _) = _, %v; want _, %v", err, tc.want) + } + }) + } +} + +func (s) TestCallCredsFromCallOptionsStatusCodes(t *testing.T) { + testCases := []struct { + name string + credsErr error + want error + }{{ + name: "legal status code", + credsErr: status.Errorf(codes.Unavailable, "this error is fine"), + want: status.Errorf(codes.Unavailable, "this error is fine"), + }, { + name: "illegal status code", + credsErr: status.Errorf(codes.NotFound, "this error is bad"), + want: status.Errorf(codes.Internal, "this error is bad"), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + + errChan := make(chan error, 1) + creds := &testPerRPCCredentials{errChan: errChan} + + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + errChan <- tc.credsErr + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(creds)); status.Code(err) != status.Code(tc.want) || !strings.Contains(err.Error(), status.Convert(tc.want).Message()) { + t.Fatalf("client.EmptyCall(_, _) = _, %v; want _, %v", err, tc.want) + } + }) + } +} diff --git a/test/creds_test.go b/test/creds_test.go index 6b3fc2a46076..06c716a3ee92 100644 --- a/test/creds_test.go +++ b/test/creds_test.go @@ -31,13 +31,16 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" - testpb "google.golang.org/grpc/test/grpc_testing" "google.golang.org/grpc/testdata" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) const ( @@ -52,7 +55,7 @@ type testCredsBundle struct { func (c *testCredsBundle) TransportCredentials() credentials.TransportCredentials { if c.mode == bundlePerRPCOnly { - return nil + return insecure.NewCredentials() } creds, err := credentials.NewClientTLSFromFile(testdata.Path("x509/server_ca_cert.pem"), "x.test.example.com") @@ -67,7 +70,7 @@ func (c *testCredsBundle) PerRPCCredentials() credentials.PerRPCCredentials { if c.mode == bundleTLSOnly { return nil } - return testPerRPCCredentials{} + return testPerRPCCredentials{authdata: authdata} } func (c *testCredsBundle) NewWithMode(mode string) (credentials.Bundle, error) { @@ -91,7 +94,7 @@ func (s) TestCredsBundleBoth(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -115,7 +118,7 @@ func (s) TestCredsBundleTransportCredentials(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -133,7 +136,7 @@ func (s) TestCredsBundlePerRPCCredentials(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -169,7 +172,7 @@ func (s) TestNonFailFastRPCSucceedOnTimeoutCreds(t *testing.T) { defer te.tearDown() cc := te.clientConn(grpc.WithTransportCredentials(&clientTimeoutCreds{})) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() // This unary call should succeed, because ClientHandshake will succeed for the second time. @@ -195,9 +198,9 @@ func (s) TestGRPCMethodAccessibleToCredsViaContextRequestInfo(t *testing.T) { defer te.tearDown() cc := te.clientConn(grpc.WithPerRPCCredentials(&methodTestCreds{})) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Message() != wantMethod { t.Fatalf("ss.client.EmptyCall(_, _) = _, %v; want _, _.Message()=%q", err, wantMethod) @@ -230,7 +233,7 @@ func (s) TestFailFastRPCErrorOnBadCertificates(t *testing.T) { defer te.tearDown() opts := []grpc.DialOption{grpc.WithTransportCredentials(clientAlwaysFailCred{})} - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() cc, err := grpc.DialContext(ctx, te.srvAddr, opts...) if err != nil { @@ -238,7 +241,7 @@ func (s) TestFailFastRPCErrorOnBadCertificates(t *testing.T) { } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) for i := 0; i < 1000; i++ { // This loop runs for at most 1 second. The first several RPCs will fail // with Unavailable because the connection hasn't started. When the @@ -258,17 +261,15 @@ func (s) TestWaitForReadyRPCErrorOnBadCertificates(t *testing.T) { defer te.tearDown() opts := []grpc.DialOption{grpc.WithTransportCredentials(clientAlwaysFailCred{})} - dctx, dcancel := context.WithTimeout(context.Background(), 10*time.Second) - defer dcancel() - cc, err := grpc.DialContext(dctx, te.srvAddr, opts...) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, err := grpc.DialContext(ctx, te.srvAddr, opts...) if err != nil { t.Fatalf("Dial(_) = %v, want %v", err, nil) } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() + tc := testgrpc.NewTestServiceClient(cc) if _, err = tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); strings.Contains(err.Error(), clientAlwaysFailCredErrorMsg) { return } @@ -283,10 +284,17 @@ var ( } ) -type testPerRPCCredentials struct{} +type testPerRPCCredentials struct { + authdata map[string]string + errChan chan error +} func (cr testPerRPCCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return authdata, nil + var err error + if cr.errChan != nil { + err = <-cr.errChan + } + return cr.authdata, err } func (cr testPerRPCCredentials) RequireTransportSecurity() bool { @@ -319,12 +327,12 @@ func (s) TestPerRPCCredentialsViaDialOptions(t *testing.T) { func testPerRPCCredentialsViaDialOptions(t *testing.T, e env) { te := newTest(t, e) te.tapHandle = authHandle - te.perRPCCreds = testPerRPCCredentials{} + te.perRPCCreds = testPerRPCCredentials{authdata: authdata} te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -345,10 +353,10 @@ func testPerRPCCredentialsViaCallOptions(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{authdata: authdata})); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } @@ -361,7 +369,7 @@ func (s) TestPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T) { func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) { te := newTest(t, e) - te.perRPCCreds = testPerRPCCredentials{} + te.perRPCCreds = testPerRPCCredentials{authdata: authdata} // When credentials are provided via both dial options and call options, // we apply both sets. te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) { @@ -387,10 +395,10 @@ func testPerRPCCredentialsViaDialOptionsAndCallOptions(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{})); err != nil { + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testPerRPCCredentials{authdata: authdata})); err != nil { t.Fatalf("Test failed. Reason: %v", err) } } @@ -434,17 +442,9 @@ func (s) TestCredsHandshakeAuthority(t *testing.T) { defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } - if !cc.WaitForStateChange(ctx, s) { - t.Fatalf("ClientConn is not ready after 100 ms") - } - } + awaitState(ctx, t, cc, connectivity.Ready) if cred.got != testAuthority { t.Fatalf("client creds got authority: %q, want: %q", cred.got, testAuthority) @@ -474,17 +474,9 @@ func (s) TestCredsHandshakeServerNameAuthority(t *testing.T) { defer cc.Close() r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String(), ServerName: testServerName}}}) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - for { - s := cc.GetState() - if s == connectivity.Ready { - break - } - if !cc.WaitForStateChange(ctx, s) { - t.Fatalf("ClientConn is not ready after 100 ms") - } - } + awaitState(ctx, t, cc, connectivity.Ready) if cred.got != testServerName { t.Fatalf("client creds got authority: %q, want: %q", cred.got, testAuthority) diff --git a/test/end2end_test.go b/test/end2end_test.go index 902e94241048..865285b35a2d 100644 --- a/test/end2end_test.go +++ b/test/end2end_test.go @@ -21,7 +21,6 @@ package test import ( "bufio" "bytes" - "compress/gzip" "context" "crypto/tls" "errors" @@ -42,19 +41,17 @@ import ( "time" "github.com/golang/protobuf/proto" - anypb "github.com/golang/protobuf/ptypes/any" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding" - _ "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/health" - healthgrpc "google.golang.org/grpc/health/grpc_health_v1" - healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" @@ -65,12 +62,20 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" - "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" - testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/test/bufconn" "google.golang.org/grpc/testdata" + + anypb "github.com/golang/protobuf/ptypes/any" + spb "google.golang.org/genproto/googleapis/rpc/status" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/encoding/gzip" ) const defaultHealthService = "grpc.health.v1.Health" @@ -127,7 +132,7 @@ var ( var raceMode bool // set by race.go in race mode type testServer struct { - testpb.UnimplementedTestServiceServer + testgrpc.UnimplementedTestServiceServer security string // indicate the authentication protocol used by this server. earlyFail bool // whether to error out the execution of a service handler prematurely. @@ -160,8 +165,6 @@ func newPayload(t testpb.PayloadType, size int32) (*testpb.Payload, error) { body := make([]byte, size) switch t { case testpb.PayloadType_COMPRESSABLE: - case testpb.PayloadType_UNCOMPRESSABLE: - return nil, fmt.Errorf("PayloadType UNCOMPRESSABLE is not supported") default: return nil, fmt.Errorf("unsupported payload type: %d", t) } @@ -242,7 +245,7 @@ func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (* }, nil } -func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { +func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error { if md, ok := metadata.FromIncomingContext(stream.Context()); ok { if _, exists := md[":authority"]; !exists { return status.Errorf(codes.DataLoss, "expected an :authority metadata: %v", md) @@ -273,7 +276,7 @@ func (s *testServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest return nil } -func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { +func (s *testServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error { var sum int for { in, err := stream.Recv() @@ -293,7 +296,7 @@ func (s *testServer) StreamingInputCall(stream testpb.TestService_StreamingInput } } -func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (s *testServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { md, ok := metadata.FromIncomingContext(stream.Context()) if ok { if s.setAndSendHeader { @@ -357,7 +360,7 @@ func (s *testServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServ } } -func (s *testServer) HalfDuplexCall(stream testpb.TestService_HalfDuplexCallServer) error { +func (s *testServer) HalfDuplexCall(stream testgrpc.TestService_HalfDuplexCallServer) error { var msgBuf []*testpb.StreamingOutputCallRequest for { in, err := stream.Recv() @@ -467,7 +470,7 @@ type test struct { // expose the server's health using the default health service // implementation. This should only be used when a non-default health service // implementation is required. - healthServer healthpb.HealthServer + healthServer healthgrpc.HealthServer maxStream uint32 tapHandle tap.ServerInHandle maxServerMsgSize *int @@ -506,19 +509,15 @@ type test struct { customDialOptions []grpc.DialOption resolverScheme string - // All test dialing is blocking by default. Set this to true if dial - // should be non-blocking. - nonBlockingDial bool - // These are are set once startServer is called. The common case is to have // only one testServer. srv stopper - hSrv healthpb.HealthServer + hSrv healthgrpc.HealthServer srvAddr string // These are are set once startServers is called. srvs []stopper - hSrvs []healthpb.HealthServer + hSrvs []healthgrpc.HealthServer srvAddrs []string cc *grpc.ClientConn // nil until requested via clientConn @@ -567,7 +566,7 @@ func newTest(t *testing.T, e env) *test { return te } -func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener { +func (te *test) listenAndServe(ts testgrpc.TestServiceServer, listen func(network, address string) (net.Listener, error)) net.Listener { te.t.Helper() te.t.Logf("Running test in %s environment...", te.e.name) sopts := []grpc.ServerOption{grpc.MaxConcurrentStreams(te.maxStream)} @@ -627,7 +626,7 @@ func (te *test) listenAndServe(ts testpb.TestServiceServer, listen func(network, sopts = append(sopts, te.customServerOptions...) s := grpc.NewServer(sopts...) if ts != nil { - testpb.RegisterTestServiceServer(s, ts) + testgrpc.RegisterTestServiceServer(s, ts) } // Create a new default health server if enableHealthServer is set, or use @@ -692,20 +691,20 @@ func (w wrapHS) Stop() { w.s.Close() } -func (te *test) startServerWithConnControl(ts testpb.TestServiceServer) *listenerWrapper { +func (te *test) startServerWithConnControl(ts testgrpc.TestServiceServer) *listenerWrapper { l := te.listenAndServe(ts, listenWithConnControl) return l.(*listenerWrapper) } // startServer starts a gRPC server exposing the provided TestService // implementation. Callers should defer a call to te.tearDown to clean up -func (te *test) startServer(ts testpb.TestServiceServer) { +func (te *test) startServer(ts testgrpc.TestServiceServer) { te.t.Helper() te.listenAndServe(ts, net.Listen) } // startServers starts 'num' gRPC servers exposing the provided TestService. -func (te *test) startServers(ts testpb.TestServiceServer, num int) { +func (te *test) startServers(ts testgrpc.TestServiceServer, num int) { for i := 0; i < num; i++ { te.startServer(ts) te.srvs = append(te.srvs, te.srv.(*grpc.Server)) @@ -802,7 +801,7 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) case "empty": // Don't add any transport creds option. default: - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } // TODO(bar) switch balancer case "pick_first". var scheme string @@ -812,7 +811,7 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) scheme = te.resolverScheme + ":///" } if te.e.balancer != "" { - opts = append(opts, grpc.WithBalancerName(te.e.balancer)) + opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, te.e.balancer))) } if te.clientInitialWindowSize > 0 { opts = append(opts, grpc.WithInitialWindowSize(te.clientInitialWindowSize)) @@ -826,10 +825,6 @@ func (te *test) configDial(opts ...grpc.DialOption) ([]grpc.DialOption, string) if te.customCodec != nil { opts = append(opts, grpc.WithDefaultCallOptions(grpc.ForceCodec(te.customCodec))) } - if !te.nonBlockingDial && te.srvAddr != "" { - // Only do a blocking dial if server is up. - opts = append(opts, grpc.WithBlock()) - } if te.srvAddr == "" { te.srvAddr = "client.side.only.test" } @@ -893,6 +888,32 @@ type lazyConn struct { beLazy int32 } +// possible conn closed errors. +const possibleConnResetMsg = "connection reset by peer" +const possibleEOFMsg = "error reading from server: EOF" + +// isConnClosedErr checks the error msg for possible conn closed messages. There +// is a raceyness in the timing of when TCP packets are sent from client to +// server, and when we tell the server to stop, so we need to check for both of +// these possible error messages: +// 1. If the call to ss.S.Stop() causes the server's sockets to close while +// there's still in-fight data from the client on the TCP connection, then +// the kernel can send an RST back to the client (also see +// https://stackoverflow.com/questions/33053507/econnreset-in-send-linux-c). +// Note that while this condition is expected to be rare due to the +// test httpServer start synchronization, in theory it should be possible, +// e.g. if the client sends a BDP ping at the right time. +// 2. If, for example, the call to ss.S.Stop() happens after the RPC headers +// have been received at the server, then the TCP connection can shutdown +// gracefully when the server's socket closes. +// 3. If there is an actual io.EOF received because the client stopped the stream. +func isConnClosedErr(err error) bool { + errContainsConnResetMsg := strings.Contains(err.Error(), possibleConnResetMsg) + errContainsEOFMsg := strings.Contains(err.Error(), possibleEOFMsg) + + return errContainsConnResetMsg || errContainsEOFMsg || err == io.EOF +} + func (l *lazyConn) Write(b []byte) (int, error) { if atomic.LoadInt32(&(l.beLazy)) == 1 { time.Sleep(time.Second) @@ -917,7 +938,7 @@ func (s) TestContextDeadlineNotIgnored(t *testing.T) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) @@ -944,38 +965,25 @@ func (s) TestTimeoutOnDeadServer(t *testing.T) { func testTimeoutOnDeadServer(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } + // Wait for the client to report READY, stop the server, then wait for the + // client to notice the connection is gone. + awaitState(ctx, t, cc, connectivity.Ready) te.srv.Stop() - cancel() - - // Wait for the client to notice the connection is gone. - ctx, cancel = context.WithTimeout(context.Background(), 500*time.Millisecond) - state := cc.GetState() - for ; state == connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { - } - cancel() - if state == connectivity.Ready { - t.Fatalf("Timed out waiting for non-ready state") - } - ctx, cancel = context.WithTimeout(context.Background(), time.Millisecond) + awaitNotState(ctx, t, cc, connectivity.Ready) + ctx, cancel = context.WithTimeout(ctx, 5*time.Millisecond) _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) cancel() - if e.balancer != "" && status.Code(err) != codes.DeadlineExceeded { - // If e.balancer == nil, the ac will stop reconnecting because the dialer returns non-temp error, - // the error will be an internal error. + if status.Code(err) != codes.DeadlineExceeded { t.Fatalf("TestService/EmptyCall(%v, _) = _, %v, want _, error code: %s", ctx, err, codes.DeadlineExceeded) } awaitNewConnLogOutput() @@ -1001,489 +1009,153 @@ func testServerGracefulStopIdempotent(t *testing.T, e env) { } } -func (s) TestServerGoAway(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testServerGoAway(t, e) - } -} - -func testServerGoAway(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - // Finish an RPC to make sure the connection is good. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) - } - ch := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch) - }() - // Loop until the server side GoAway signal is propagated to the client. - for { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded { - cancel() - break - } - cancel() - } - // A new RPC should fail. - ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal) +func (s) TestDetailedConnectionCloseErrorPropagatesToRpcError(t *testing.T) { + rpcStartedOnServer := make(chan struct{}) + rpcDoneOnClient := make(chan struct{}) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + close(rpcStartedOnServer) + <-rpcDoneOnClient + return status.Error(codes.Internal, "arbitrary status") + }, } - <-ch - awaitNewConnLogOutput() -} - -func (s) TestServerGoAwayPendingRPC(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testServerGoAwayPendingRPC(t, e) + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) } -} + defer ss.Stop() -func testServerGoAwayPendingRPC(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - // Finish an RPC to make sure the connection is good. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) - } - ch := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch) - }() - // Loop until the server side GoAway signal is propagated to the client. - start := time.Now() - errored := false - for time.Since(start) < time.Second { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) - cancel() - if err != nil { - errored = true - break - } - } - if !errored { - t.Fatalf("GoAway never received by client") - } - respParam := []*testpb.ResponseParameters{{Size: 1}} - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + // Start an RPC. Then, while the RPC is still being accepted or handled at the server, abruptly + // stop the server, killing the connection. The RPC error message should include details about the specific + // connection error that was encountered. + stream, err := ss.Client.FullDuplexCall(ctx) if err != nil { - t.Fatal(err) - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - // The existing RPC should be still good to proceed. - if err := stream.Send(req); err != nil { - t.Fatalf("%v.Send(_) = %v, want ", stream, err) + t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) } - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + // Block until the RPC has been started on the server. This ensures that the ClientConn will find a healthy + // connection for the RPC to go out on initially, and that the TCP connection will shut down strictly after + // the RPC has been started on it. + <-rpcStartedOnServer + ss.S.Stop() + // The precise behavior of this test is subject to raceyness around the timing + // of when TCP packets are sent from client to server, and when we tell the + // server to stop, so we need to account for both possible error messages. + if _, err := stream.Recv(); err == io.EOF || !isConnClosedErr(err) { + t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q OR %q", stream, err, possibleConnResetMsg, possibleEOFMsg) } - // The RPC will run until canceled. - cancel() - <-ch - awaitNewConnLogOutput() + close(rpcDoneOnClient) } -func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) { +func (s) TestFailFast(t *testing.T) { for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testServerMultipleGoAwayPendingRPC(t, e) + testFailFast(t, e) } } -func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { +func testFailFast(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithCancel(context.Background()) - stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - // Finish an RPC to make sure the connection is good. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) } - ch1 := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch1) - }() - ch2 := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch2) - }() - // Loop until the server side GoAway signal is propagated to the client. + // Stop the server and tear down all the existing connections. + te.srv.Stop() + // Loop until the server teardown is propagated to the client. for { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - cancel() + if err := ctx.Err(); err != nil { + t.Fatalf("EmptyCall did not return UNAVAILABLE before timeout") + } + _, err := tc.EmptyCall(ctx, &testpb.Empty{}) + if status.Code(err) == codes.Unavailable { break } - cancel() - } - select { - case <-ch1: - t.Fatal("GracefulStop() terminated early") - case <-ch2: - t.Fatal("GracefulStop() terminated early") - default: - } - respParam := []*testpb.ResponseParameters{ - { - Size: 1, - }, - } - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) - if err != nil { - t.Fatal(err) - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - // The existing RPC should be still good to proceed. - if err := stream.Send(req); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err) + time.Sleep(10 * time.Millisecond) } - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + // The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { + t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable) } - if err := stream.CloseSend(); err != nil { - t.Fatalf("%v.CloseSend() = %v, want ", stream, err) + if _, err := tc.StreamingInputCall(ctx); status.Code(err) != codes.Unavailable { + t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable) } - <-ch1 - <-ch2 - cancel() - awaitNewConnLogOutput() -} -func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testConcurrentClientConnCloseAndServerGoAway(t, e) - } + awaitNewConnLogOutput() } -func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) { +func testServiceConfigSetup(t *testing.T, e env) *test { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", ) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() + return te +} - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) - } - ch := make(chan struct{}) - // Close ClientConn and Server concurrently. - go func() { - te.srv.GracefulStop() - close(ch) - }() - go func() { - cc.Close() - }() - <-ch +func newBool(b bool) (a *bool) { + return &b } -func (s) TestConcurrentServerStopAndGoAway(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testConcurrentServerStopAndGoAway(t, e) - } +func newInt(b int) (a *int) { + return &b } -func testConcurrentServerStopAndGoAway(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) - te.startServer(&testServer{security: e.security}) +func newDuration(b time.Duration) (a *time.Duration) { + a = new(time.Duration) + *a = b + return +} + +func (s) TestGetMethodConfig(t *testing.T) { + te := testServiceConfigSetup(t, tcpClearRREnv) defer te.tearDown() + r := manual.NewBuilderWithScheme("whatever") - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } + te.resolverScheme = r.Scheme() + cc := te.clientConn(grpc.WithResolvers(r)) + addrs := []resolver.Address{{Addr: te.srvAddr}} + r.UpdateState(resolver.State{ + Addresses: addrs, + ServiceConfig: parseServiceConfig(t, r, `{ + "methodConfig": [ + { + "name": [ + { + "service": "grpc.testing.TestService", + "method": "EmptyCall" + } + ], + "waitForReady": true, + "timeout": ".001s" + }, + { + "name": [ + { + "service": "grpc.testing.TestService" + } + ], + "waitForReady": false + } + ] +}`)}) - // Finish an RPC to make sure the connection is good. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) - } + tc := testgrpc.NewTestServiceClient(cc) - ch := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(ch) - }() - // Loop until the server side GoAway signal is propagated to the client. + // Make sure service config has been processed by grpc. for { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - cancel() + if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { break } - cancel() - } - // Stop the server and close all the connections. - te.srv.Stop() - respParam := []*testpb.ResponseParameters{ - { - Size: 1, - }, - } - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) - if err != nil { - t.Fatal(err) - } - req := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - sendStart := time.Now() - for { - if err := stream.Send(req); err == io.EOF { - // stream.Send should eventually send io.EOF - break - } else if err != nil { - // Send should never return a transport-level error. - t.Fatalf("stream.Send(%v) = %v; want ", req, err) - } - if time.Since(sendStart) > 2*time.Second { - t.Fatalf("stream.Send(_) did not return io.EOF after 2s") - } - time.Sleep(time.Millisecond) - } - if _, err := stream.Recv(); err == nil || err == io.EOF { - t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) - } - <-ch - awaitNewConnLogOutput() -} - -func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - continue - } - testClientConnCloseAfterGoAwayWithActiveStream(t, e) - } -} - -func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { - te := newTest(t, e) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - if _, err := tc.FullDuplexCall(ctx); err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, ", tc, err) - } - done := make(chan struct{}) - go func() { - te.srv.GracefulStop() - close(done) - }() - time.Sleep(50 * time.Millisecond) - cc.Close() - timeout := time.NewTimer(time.Second) - select { - case <-done: - case <-timeout.C: - t.Fatalf("Test timed-out.") - } -} - -func (s) TestFailFast(t *testing.T) { - for _, e := range listTestEnv() { - testFailFast(t, e) - } -} - -func testFailFast(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) - } - // Stop the server and tear down all the existing connections. - te.srv.Stop() - // Loop until the server teardown is propagated to the client. - for { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - _, err := tc.EmptyCall(ctx, &testpb.Empty{}) - cancel() - if status.Code(err) == codes.Unavailable { - break - } - t.Logf("%v.EmptyCall(_, _) = _, %v", tc, err) - time.Sleep(10 * time.Millisecond) - } - // The client keeps reconnecting and ongoing fail-fast RPCs should fail with code.Unavailable. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { - t.Fatalf("TestService/EmptyCall(_, _, _) = _, %v, want _, error code: %s", err, codes.Unavailable) - } - if _, err := tc.StreamingInputCall(ctx); status.Code(err) != codes.Unavailable { - t.Fatalf("TestService/StreamingInputCall(_) = _, %v, want _, error code: %s", err, codes.Unavailable) - } - - awaitNewConnLogOutput() -} - -func testServiceConfigSetup(t *testing.T, e env) *test { - te := newTest(t, e) - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - "Failed to dial : context canceled; please retry.", - ) - return te -} - -func newBool(b bool) (a *bool) { - return &b -} - -func newInt(b int) (a *int) { - return &b -} - -func newDuration(b time.Duration) (a *time.Duration) { - a = new(time.Duration) - *a = b - return -} - -func (s) TestGetMethodConfig(t *testing.T) { - te := testServiceConfigSetup(t, tcpClearRREnv) - defer te.tearDown() - r := manual.NewBuilderWithScheme("whatever") - - te.resolverScheme = r.Scheme() - cc := te.clientConn(grpc.WithResolvers(r)) - addrs := []resolver.Address{{Addr: te.srvAddr}} - r.UpdateState(resolver.State{ - Addresses: addrs, - ServiceConfig: parseCfg(r, `{ - "methodConfig": [ - { - "name": [ - { - "service": "grpc.testing.TestService", - "method": "EmptyCall" - } - ], - "waitForReady": true, - "timeout": ".001s" - }, - { - "name": [ - { - "service": "grpc.testing.TestService" - } - ], - "waitForReady": false - } - ] -}`)}) - - tc := testpb.NewTestServiceClient(cc) - - // Make sure service config has been processed by grpc. - for { - if cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { - break - } - time.Sleep(time.Millisecond) + time.Sleep(time.Millisecond) } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -1494,7 +1166,7 @@ func (s) TestGetMethodConfig(t *testing.T) { t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) } - r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseCfg(r, `{ + r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1541,7 +1213,7 @@ func (s) TestServiceConfigWaitForReady(t *testing.T) { addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1560,7 +1232,7 @@ func (s) TestServiceConfigWaitForReady(t *testing.T) { ] }`)}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // Make sure service config has been processed by grpc. for { @@ -1584,7 +1256,7 @@ func (s) TestServiceConfigWaitForReady(t *testing.T) { // Case2:Client API set failfast to be false, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1630,7 +1302,7 @@ func (s) TestServiceConfigTimeout(t *testing.T) { addrs := []resolver.Address{{Addr: te.srvAddr}} r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1649,7 +1321,7 @@ func (s) TestServiceConfigTimeout(t *testing.T) { ] }`)}) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // Make sure service config has been processed by grpc. for { @@ -1677,7 +1349,7 @@ func (s) TestServiceConfigTimeout(t *testing.T) { // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. r.UpdateState(resolver.State{ Addresses: addrs, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1744,12 +1416,11 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { defer te1.tearDown() te1.resolverScheme = r.Scheme() - te1.nonBlockingDial = true te1.startServer(&testServer{security: e.security}) cc1 := te1.clientConn(grpc.WithResolvers(r)) addrs := []resolver.Address{{Addr: te1.srvAddr}} - sc := parseCfg(r, `{ + sc := parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -1768,7 +1439,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { ] }`) r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: sc}) - tc := testpb.NewTestServiceClient(cc1) + tc := testgrpc.NewTestServiceClient(cc1) req := &testpb.SimpleRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, @@ -1832,7 +1503,6 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te2 := testServiceConfigSetup(t, e) te2.resolverScheme = r.Scheme() - te2.nonBlockingDial = true te2.maxClientReceiveMsgSize = newInt(1024) te2.maxClientSendMsgSize = newInt(1024) @@ -1840,7 +1510,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { defer te2.tearDown() cc2 := te2.clientConn(grpc.WithResolvers(r)) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te2.srvAddr}}, ServiceConfig: sc}) - tc = testpb.NewTestServiceClient(cc2) + tc = testgrpc.NewTestServiceClient(cc2) for { if cc2.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil { @@ -1892,7 +1562,6 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). te3 := testServiceConfigSetup(t, e) te3.resolverScheme = r.Scheme() - te3.nonBlockingDial = true te3.maxClientReceiveMsgSize = newInt(4096) te3.maxClientSendMsgSize = newInt(4096) @@ -1901,7 +1570,7 @@ func (s) TestServiceConfigMaxMsgSize(t *testing.T) { cc3 := te3.clientConn(grpc.WithResolvers(r)) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: te3.srvAddr}}, ServiceConfig: sc}) - tc = testpb.NewTestServiceClient(cc3) + tc = testgrpc.NewTestServiceClient(cc3) for { if cc3.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall").MaxReqSize != nil { @@ -1985,13 +1654,12 @@ func (s) TestStreamingRPCWithTimeoutInServiceConfigRecv(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") te.resolverScheme = r.Scheme() - te.nonBlockingDial = true cc := te.clientConn(grpc.WithResolvers(r)) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: te.srvAddr}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -2057,15 +1725,12 @@ func testPreloaderClientSend(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) // Test for streaming RPC recv. // Set context for send with proper RPC Information @@ -2121,6 +1786,61 @@ func testPreloaderClientSend(t *testing.T, e env) { } } +func (s) TestPreloaderSenderSend(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for i := 0; i < 10; i++ { + preparedMsg := &grpc.PreparedMsg{} + err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{ + Payload: &testpb.Payload{ + Body: []byte{'0' + uint8(i)}, + }, + }) + if err != nil { + return err + } + stream.SendMsg(preparedMsg) + } + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + var ngot int + var buf bytes.Buffer + for { + reply, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + ngot++ + if buf.Len() > 0 { + buf.WriteByte(',') + } + buf.Write(reply.GetPayload().GetBody()) + } + if want := 10; ngot != want { + t.Errorf("Got %d replies, want %d", ngot, want) + } + if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want { + t.Errorf("Got replies %q; want %q", got, want) + } +} + func (s) TestMaxMsgSizeClientDefault(t *testing.T) { for _, e := range listTestEnv() { testMaxMsgSizeClientDefault(t, e) @@ -2131,15 +1851,12 @@ func testMaxMsgSizeClientDefault(t *testing.T, e env) { te := newTest(t, e) te.userAgent = testAppUA te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 4 * 1024 * 1024 @@ -2198,15 +1915,12 @@ func testMaxMsgSizeClientAPI(t *testing.T, e env) { te.maxClientReceiveMsgSize = newInt(1024) te.maxClientSendMsgSize = newInt(1024) te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 1024 @@ -2286,15 +2000,12 @@ func testMaxMsgSizeServerAPI(t *testing.T, e env) { te.maxServerReceiveMsgSize = newInt(1024) te.maxServerSendMsgSize = newInt(1024) te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", "Failed to dial : context canceled; please retry.", ) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const smallSize = 1 const largeSize = 1024 @@ -2380,10 +2091,13 @@ type myTap struct { func (t *myTap) handle(ctx context.Context, info *tap.Info) (context.Context, error) { if info != nil { - if info.FullMethodName == "/grpc.testing.TestService/EmptyCall" { + switch info.FullMethodName { + case "/grpc.testing.TestService/EmptyCall": t.cnt++ - } else if info.FullMethodName == "/grpc.testing.TestService/UnaryCall" { + case "/grpc.testing.TestService/UnaryCall": return nil, fmt.Errorf("tap error") + case "/grpc.testing.TestService/FullDuplexCall": + return nil, status.Errorf(codes.FailedPrecondition, "test custom error") } } return ctx, nil @@ -2394,16 +2108,11 @@ func testTap(t *testing.T, e env) { te.userAgent = testAppUA ttap := &myTap{} te.tapHandle = ttap.handle - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - ) te.startServer(&testServer{security: e.security}) defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -2423,385 +2132,82 @@ func testTap(t *testing.T, e env) { ResponseSize: 45, Payload: payload, } - if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.Unavailable { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) - } -} - -// healthCheck is a helper function to make a unary health check RPC and return -// the response. -func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) { - ctx, cancel := context.WithTimeout(context.Background(), d) - defer cancel() - hc := healthgrpc.NewHealthClient(cc) - return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service}) -} - -// verifyHealthCheckStatus is a helper function to verify that the current -// health status of the service matches the one passed in 'wantStatus'. -func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) { - t.Helper() - resp, err := healthCheck(d, cc, service) - if err != nil { - t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) - } - if resp.Status != wantStatus { - t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus) - } -} - -// verifyHealthCheckErrCode is a helper function to verify that a unary health -// check RPC returns an error with a code set to 'wantCode'. -func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) { - t.Helper() - if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode { - t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode) - } -} - -// newHealthCheckStream is a helper function to start a health check streaming -// RPC, and returns the stream. -func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) { - t.Helper() - ctx, cancel := context.WithCancel(context.Background()) - hc := healthgrpc.NewHealthClient(cc) - stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service}) - if err != nil { - t.Fatalf("hc.Watch(_, %v) failed: %v", service, err) + if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.PermissionDenied { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, %s", err, codes.PermissionDenied) } - return stream, cancel -} - -// healthWatchChecker is a helper function to verify that the next health -// status returned on the given stream matches the one passed in 'wantStatus'. -func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) { - t.Helper() - response, err := stream.Recv() + str, err := tc.FullDuplexCall(ctx) if err != nil { - t.Fatalf("stream.Recv() failed: %v", err) + t.Fatalf("Unexpected error creating stream: %v", err) } - if response.Status != wantStatus { - t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus) + if _, err := str.Recv(); status.Code(err) != codes.FailedPrecondition { + t.Fatalf("FullDuplexCall Recv() = _, %v, want _, %s", err, codes.FailedPrecondition) } } -// TestHealthCheckSuccess invokes the unary Check() RPC on the health server in -// a successful case. -func (s) TestHealthCheckSuccess(t *testing.T) { +func (s) TestEmptyUnaryWithUserAgent(t *testing.T) { for _, e := range listTestEnv() { - testHealthCheckSuccess(t, e) + testEmptyUnaryWithUserAgent(t, e) } } -func testHealthCheckSuccess(t *testing.T, e env) { +func testEmptyUnaryWithUserAgent(t *testing.T, e env) { te := newTest(t, e) - te.enableHealthServer = true + te.userAgent = testAppUA te.startServer(&testServer{security: e.security}) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) defer te.tearDown() - verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK) -} - -// TestHealthCheckFailure invokes the unary Check() RPC on the health server -// with an expired context and expects the RPC to fail. -func (s) TestHealthCheckFailure(t *testing.T) { - for _, e := range listTestEnv() { - testHealthCheckFailure(t, e) + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + var header metadata.MD + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + reply, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Header(&header)) + if err != nil || !proto.Equal(&testpb.Empty{}, reply) { + t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, ", reply, err, &testpb.Empty{}) + } + if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) { + t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA) } -} - -func testHealthCheckFailure(t *testing.T, e env) { - te := newTest(t, e) - te.declareLogNoise( - "Failed to dial ", - "grpc: the client connection is closing; please retry", - ) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - defer te.tearDown() - verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded) - awaitNewConnLogOutput() + te.srv.Stop() } -// TestHealthCheckOff makes a unary Check() RPC on the health server where the -// health status of the defaultHealthService is not set, and therefore expects -// an error code 'codes.NotFound'. -func (s) TestHealthCheckOff(t *testing.T) { +func (s) TestFailedEmptyUnary(t *testing.T) { for _, e := range listTestEnv() { - // TODO(bradfitz): Temporarily skip this env due to #619. if e.name == "handler-tls" { + // This test covers status details, but + // Grpc-Status-Details-Bin is not support in handler_server. continue } - testHealthCheckOff(t, e) + testFailedEmptyUnary(t, e) } } -func testHealthCheckOff(t *testing.T, e env) { +func testFailedEmptyUnary(t *testing.T, e env) { te := newTest(t, e) - te.enableHealthServer = true + te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() + tc := testgrpc.NewTestServiceClient(te.clientConn()) - verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound) + ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) + wantErr := detailedError + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr) + } } -// TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health -// server with multiple clients and expects the same status on both streams. -func (s) TestHealthWatchMultipleClients(t *testing.T) { +func (s) TestLargeUnary(t *testing.T) { for _, e := range listTestEnv() { - testHealthWatchMultipleClients(t, e) + testLargeUnary(t, e) } } -func testHealthWatchMultipleClients(t *testing.T, e env) { +func testLargeUnary(t *testing.T, e env) { te := newTest(t, e) - te.enableHealthServer = true te.startServer(&testServer{security: e.security}) defer te.tearDown() - - cc := te.clientConn() - stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService) - defer cf1() - healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - - stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService) - defer cf2() - healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) - healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING) - healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING) -} - -// TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server -// and makes sure that the health status of the server is as expected after -// multiple calls to SetServingStatus with the same status. -func (s) TestHealthWatchSameStatus(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchSameStatus(t, e) - } -} - -func testHealthWatchSameStatus(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) - defer cf() - - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) -} - -// TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server -// on which the health status for the defaultService is set before the gRPC -// server is started, and expects the correct health status to be returned. -func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchSetServiceStatusBeforeStartingServer(t, e) - } -} - -func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) { - hs := health.NewServer() - te := newTest(t, e) - te.healthServer = hs - hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) - defer cf() - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) -} - -// TestHealthWatchDefaultStatusChange verifies the simple case where the -// service starts off with a SERVICE_UNKNOWN status (because SetServingStatus -// hasn't been called yet) and then moves to SERVING after SetServingStatus is -// called. -func (s) TestHealthWatchDefaultStatusChange(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchDefaultStatusChange(t, e) - } -} - -func testHealthWatchDefaultStatusChange(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) - defer cf() - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) -} - -// TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case -// where the health status is set to SERVING before the client calls Watch(). -func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e) - } -} - -func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) - defer cf() - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) -} - -// TestHealthWatchOverallServerHealthChange verifies setting the overall status -// of the server by using the empty service name. -func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) { - for _, e := range listTestEnv() { - testHealthWatchOverallServerHealthChange(t, e) - } -} - -func testHealthWatchOverallServerHealthChange(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - stream, cf := newHealthCheckStream(t, te.clientConn(), "") - defer cf() - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) - te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) - healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) -} - -// TestUnknownHandler verifies that an expected error is returned (by setting -// the unknownHandler on the server) for a service which is not exposed to the -// client. -func (s) TestUnknownHandler(t *testing.T) { - // An example unknownHandler that returns a different code and a different - // method, making sure that we do not expose what methods are implemented to - // a client that is not authenticated. - unknownHandler := func(srv interface{}, stream grpc.ServerStream) error { - return status.Error(codes.Unauthenticated, "user unauthenticated") - } - for _, e := range listTestEnv() { - // TODO(bradfitz): Temporarily skip this env due to #619. - if e.name == "handler-tls" { - continue - } - testUnknownHandler(t, e, unknownHandler) - } -} - -func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) { - te := newTest(t, e) - te.unknownHandler = unknownHandler - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated) -} - -// TestHealthCheckServingStatus makes a streaming Watch() RPC on the health -// server and verifies a bunch of health status transitions. -func (s) TestHealthCheckServingStatus(t *testing.T) { - for _, e := range listTestEnv() { - testHealthCheckServingStatus(t, e) - } -} - -func testHealthCheckServingStatus(t *testing.T, e env) { - te := newTest(t, e) - te.enableHealthServer = true - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING) - verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) - verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING) - te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) - verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) -} - -func (s) TestEmptyUnaryWithUserAgent(t *testing.T) { - for _, e := range listTestEnv() { - testEmptyUnaryWithUserAgent(t, e) - } -} - -func testEmptyUnaryWithUserAgent(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - var header metadata.MD - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - reply, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.Header(&header)) - if err != nil || !proto.Equal(&testpb.Empty{}, reply) { - t.Fatalf("TestService/EmptyCall(_, _) = %v, %v, want %v, ", reply, err, &testpb.Empty{}) - } - if v, ok := header["ua"]; !ok || !strings.HasPrefix(v[0], testAppUA) { - t.Fatalf("header[\"ua\"] = %q, %t, want string with prefix %q, true", v, ok, testAppUA) - } - - te.srv.Stop() -} - -func (s) TestFailedEmptyUnary(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - // This test covers status details, but - // Grpc-Status-Details-Bin is not support in handler_server. - continue - } - testFailedEmptyUnary(t, e) - } -} - -func testFailedEmptyUnary(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = failAppUA - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - - ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) - wantErr := detailedError - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); !testutils.StatusErrEqual(err, wantErr) { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %v", err, wantErr) - } -} - -func (s) TestLargeUnary(t *testing.T) { - for _, e := range listTestEnv() { - testLargeUnary(t, e) - } -} - -func testLargeUnary(t *testing.T, e env) { - te := newTest(t, e) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const argSize = 271828 const respSize = 314159 @@ -2843,7 +2249,7 @@ func testExceedMsgLimit(t *testing.T, e env) { te.maxServerMsgSize, te.maxClientMsgSize = newInt(maxMsgSize), newInt(maxMsgSize) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) largeSize := int32(maxMsgSize + 1) const smallSize = 1 @@ -2925,7 +2331,7 @@ func testPeerClientSide(t *testing.T, e env) { te.userAgent = testAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) peer := new(peer.Peer) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -2967,7 +2373,7 @@ func testPeerNegative(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) peer := new(peer.Peer) ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -2986,7 +2392,7 @@ func testPeerFailedRPC(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -3041,7 +2447,7 @@ func testMetadataUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const argSize = 2718 const respSize = 314 @@ -3067,6 +2473,7 @@ func testMetadataUnaryRPC(t *testing.T, e env) { delete(header, "date") // the Date header is also optional delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") } if !reflect.DeepEqual(header, testMetadata) { t.Fatalf("Received header metadata %v, want %v", header, testMetadata) @@ -3086,7 +2493,7 @@ func testMetadataOrderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) ctx = metadata.AppendToOutgoingContext(ctx, "key1", "value2") @@ -3106,6 +2513,7 @@ func testMetadataOrderUnaryRPC(t *testing.T, e env) { delete(header, "date") // the Date header is also optional delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") } if !reflect.DeepEqual(header, newMetadata) { @@ -3123,7 +2531,7 @@ func testMultipleSetTrailerUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, multipleSetTrailer: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -3160,7 +2568,7 @@ func testMultipleSetTrailerStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, multipleSetTrailer: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) @@ -3195,7 +2603,7 @@ func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setAndSendHeader: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -3218,6 +2626,8 @@ func testSetAndSendHeaderUnaryRPC(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") + expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3238,7 +2648,7 @@ func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -3262,6 +2672,7 @@ func testMultipleSetHeaderUnaryRPC(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3282,7 +2693,7 @@ func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -3305,6 +2716,7 @@ func testMultipleSetHeaderUnaryRPCError(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3325,7 +2737,7 @@ func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setAndSendHeader: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(context.Background(), testMetadata) stream, err := tc.FullDuplexCall(ctx) @@ -3345,6 +2757,7 @@ func testSetAndSendHeaderStreamingRPC(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3365,7 +2778,7 @@ func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -3408,6 +2821,7 @@ func testMultipleSetHeaderStreamingRPC(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3429,7 +2843,7 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) const ( argSize = 1 @@ -3468,6 +2882,7 @@ func testMultipleSetHeaderStreamingRPCError(t *testing.T, e env) { } delete(header, "user-agent") delete(header, "content-type") + delete(header, "grpc-accept-encoding") expectedHeader := metadata.Join(testMetadata, testMetadata2) if !reflect.DeepEqual(header, expectedHeader) { t.Fatalf("Received header metadata %v, want %v", header, expectedHeader) @@ -3494,7 +2909,7 @@ func testMalformedHTTP2Metadata(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 2718) if err != nil { @@ -3512,66 +2927,79 @@ func testMalformedHTTP2Metadata(t *testing.T, e env) { } } +// Tests that the client transparently retries correctly when receiving a +// RST_STREAM with code REFUSED_STREAM. func (s) TestTransparentRetry(t *testing.T) { - for _, e := range listTestEnv() { - if e.name == "handler-tls" { - // Fails with RST_STREAM / FLOW_CONTROL_ERROR - continue - } - testTransparentRetry(t, e) - } -} - -// This test makes sure RPCs are retried times when they receive a RST_STREAM -// with the REFUSED_STREAM error code, which the InTapHandle provokes. -func testTransparentRetry(t *testing.T, e env) { - te := newTest(t, e) - attempts := 0 - successAttempt := 2 - te.tapHandle = func(ctx context.Context, _ *tap.Info) (context.Context, error) { - attempts++ - if attempts < successAttempt { - return nil, errors.New("not now") - } - return ctx, nil - } - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - cc := te.clientConn() - tsc := testpb.NewTestServiceClient(cc) testCases := []struct { - successAttempt int - failFast bool - errCode codes.Code + failFast bool + errCode codes.Code }{{ - successAttempt: 1, + // success attempt: 1, (stream ID 1) }, { - successAttempt: 2, + // success attempt: 2, (stream IDs 3, 5) }, { - successAttempt: 3, - errCode: codes.Unavailable, + // no success attempt (stream IDs 7, 9) + errCode: codes.Unavailable, }, { - successAttempt: 1, - failFast: true, + // success attempt: 1 (stream ID 11), + failFast: true, }, { - successAttempt: 2, - failFast: true, + // success attempt: 2 (stream IDs 13, 15), + failFast: true, }, { - successAttempt: 3, - failFast: true, - errCode: codes.Unavailable, + // no success attempt (stream IDs 17, 19) + failFast: true, + errCode: codes.Unavailable, }} - for _, tc := range testCases { - attempts = 0 - successAttempt = tc.successAttempt - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - _, err := tsc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(!tc.failFast)) - cancel() - if status.Code(err) != tc.errCode { - t.Errorf("%+v: tsc.EmptyCall(_, _) = _, %v, want _, Code=%v", tc, err, tc.errCode) + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen. Err: %v", err) + } + defer lis.Close() + server := &httpServer{ + responses: []httpServerResponse{{ + trailers: [][]string{{ + ":status", "200", + "content-type", "application/grpc", + "grpc-status", "0", + }}, + }}, + refuseStream: func(i uint32) bool { + switch i { + case 1, 5, 11, 15: // these stream IDs succeed + return false + } + return true // these are refused + }, + } + server.start(t, lis) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("failed to dial due to err: %v", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + client := testgrpc.NewTestServiceClient(cc) + + for i, tc := range testCases { + stream, err := client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("error creating stream due to err: %v", err) + } + code := func(err error) codes.Code { + if err == io.EOF { + return codes.OK + } + return status.Code(err) } + if _, err := stream.Recv(); code(err) != tc.errCode { + t.Fatalf("%v: stream.Recv() = _, %v, want error code: %v", i, err, tc.errCode) + } + } } @@ -3588,7 +3016,7 @@ func testCancel(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) const argSize = 2718 const respSize = 314 @@ -3625,7 +3053,7 @@ func testCancelNoIO(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // Start one blocked RPC for which we'll never send streaming // input. This will consume the 1 maximum concurrent streams, @@ -3691,7 +3119,7 @@ func testNoService(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) stream, err := tc.FullDuplexCall(te.ctx, grpc.WaitForReady(true)) if err != nil { @@ -3712,7 +3140,7 @@ func testPingPong(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) stream, err := tc.FullDuplexCall(te.ctx) if err != nil { @@ -3771,7 +3199,7 @@ func testMetadataStreamingRPC(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx := metadata.NewOutgoingContext(te.ctx, testMetadata) stream, err := tc.FullDuplexCall(ctx) @@ -3786,6 +3214,7 @@ func testMetadataStreamingRPC(t *testing.T, e env) { delete(headerMD, "trailer") // ignore if present delete(headerMD, "user-agent") delete(headerMD, "content-type") + delete(headerMD, "grpc-accept-encoding") if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#1 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) } @@ -3794,6 +3223,7 @@ func testMetadataStreamingRPC(t *testing.T, e env) { delete(headerMD, "trailer") // ignore if present delete(headerMD, "user-agent") delete(headerMD, "content-type") + delete(headerMD, "grpc-accept-encoding") if err != nil || !reflect.DeepEqual(testMetadata, headerMD) { t.Errorf("#2 %v.Header() = %v, %v, want %v, ", stream, headerMD, err, testMetadata) } @@ -3848,7 +3278,7 @@ func testServerStreaming(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { @@ -3906,7 +3336,7 @@ func testFailedServerStreaming(t *testing.T, e env) { te.userAgent = failAppUA te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := make([]*testpb.ResponseParameters, len(respSizes)) for i, s := range respSizes { @@ -3940,10 +3370,10 @@ func equalError(x, y error) bool { // // All other TestServiceServer methods crash if called. type concurrentSendServer struct { - testpb.TestServiceServer + testgrpc.TestServiceServer } -func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testpb.TestService_StreamingOutputCallServer) error { +func (s concurrentSendServer) StreamingOutputCall(args *testpb.StreamingOutputCallRequest, stream testgrpc.TestService_StreamingOutputCallServer) error { for i := 0; i < 10; i++ { stream.Send(&testpb.StreamingOutputCallResponse{ Payload: &testpb.Payload{ @@ -3967,7 +3397,7 @@ func testServerStreamingConcurrent(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) doStreamingCall := func() { req := &testpb.StreamingOutputCallRequest{} @@ -4048,7 +3478,7 @@ func testClientStreaming(t *testing.T, e env, sizes []int) { te := newTest(t, e) te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(te.ctx, time.Second*30) defer cancel() @@ -4094,7 +3524,7 @@ func testClientStreamingError(t *testing.T, e env) { te := newTest(t, e) te.startServer(&testServer{security: e.security, earlyFail: true}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) stream, err := tc.StreamingInputCall(te.ctx) if err != nil { @@ -4141,7 +3571,7 @@ func testExceedMaxStreamsLimit(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) _, err := tc.StreamingInputCall(te.ctx) if err != nil { @@ -4181,7 +3611,7 @@ func testStreamsQuotaRecovery(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithCancel(context.Background()) defer cancel() if _, err := tc.StreamingInputCall(ctx); err != nil { @@ -4236,225 +3666,66 @@ func testStreamsQuotaRecovery(t *testing.T, e env) { } } -func (s) TestCompressServerHasNoSupport(t *testing.T) { +func (s) TestUnaryClientInterceptor(t *testing.T) { for _, e := range listTestEnv() { - testCompressServerHasNoSupport(t, e) + testUnaryClientInterceptor(t, e) + } +} + +func failOkayRPC(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + err := invoker(ctx, method, req, reply, cc, opts...) + if err == nil { + return status.Error(codes.NotFound, "") } + return err } -func testCompressServerHasNoSupport(t *testing.T, e env) { +func testUnaryClientInterceptor(t *testing.T, e env) { te := newTest(t, e) - te.serverCompression = false - te.clientCompression = false - te.clientNopCompression = true + te.userAgent = testAppUA + te.unaryClientInt = failOkayRPC te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - - const argSize = 271828 - const respSize = 314159 - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: respSize, - Payload: payload, - } + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.Unimplemented { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code %s", err, codes.Unimplemented) - } - // Streaming RPC - stream, err := tc.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != codes.Unimplemented { - t.Fatalf("%v.Recv() = %v, want error code %s", stream, err, codes.Unimplemented) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.NotFound { + t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound) } } -func (s) TestCompressOK(t *testing.T) { +func (s) TestStreamClientInterceptor(t *testing.T) { for _, e := range listTestEnv() { - testCompressOK(t, e) + testStreamClientInterceptor(t, e) + } +} + +func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + s, err := streamer(ctx, desc, cc, method, opts...) + if err == nil { + return nil, status.Error(codes.NotFound, "") } + return s, nil } -func testCompressOK(t *testing.T, e env) { +func testStreamClientInterceptor(t *testing.T, e env) { te := newTest(t, e) - te.serverCompression = true - te.clientCompression = true + te.streamClientInt = failOkayStream te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - // Unary call - const argSize = 271828 - const respSize = 314159 - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: respSize, - Payload: payload, - } - ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) - } - // Streaming RPC - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := tc.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { - Size: 31415, + Size: int32(1), }, } - payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) if err != nil { t.Fatal(err) } - sreq := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - stream.CloseSend() - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = %v, want ", stream, err) - } - if _, err := stream.Recv(); err != io.EOF { - t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) - } -} - -func (s) TestIdentityEncoding(t *testing.T) { - for _, e := range listTestEnv() { - testIdentityEncoding(t, e) - } -} - -func testIdentityEncoding(t *testing.T, e env) { - te := newTest(t, e) - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - - // Unary call - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 5) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: 10, - Payload: payload, - } - ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) - } - // Streaming RPC - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := tc.FullDuplexCall(ctx, grpc.UseCompressor("identity")) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) - if err != nil { - t.Fatal(err) - } - sreq := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: []*testpb.ResponseParameters{{Size: 10}}, - Payload: payload, - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - stream.CloseSend() - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = %v, want ", stream, err) - } - if _, err := stream.Recv(); err != io.EOF { - t.Fatalf("%v.Recv() = %v, want io.EOF", stream, err) - } -} - -func (s) TestUnaryClientInterceptor(t *testing.T) { - for _, e := range listTestEnv() { - testUnaryClientInterceptor(t, e) - } -} - -func failOkayRPC(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - err := invoker(ctx, method, req, reply, cc, opts...) - if err == nil { - return status.Error(codes.NotFound, "") - } - return err -} - -func testUnaryClientInterceptor(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - te.unaryClientInt = failOkayRPC - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - tc := testpb.NewTestServiceClient(te.clientConn()) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.NotFound { - t.Fatalf("%v.EmptyCall(_, _) = _, %v, want _, error code %s", tc, err, codes.NotFound) - } -} - -func (s) TestStreamClientInterceptor(t *testing.T) { - for _, e := range listTestEnv() { - testStreamClientInterceptor(t, e) - } -} - -func failOkayStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - s, err := streamer(ctx, desc, cc, method, opts...) - if err == nil { - return nil, status.Error(codes.NotFound, "") - } - return s, nil -} - -func testStreamClientInterceptor(t *testing.T, e env) { - te := newTest(t, e) - te.streamClientInt = failOkayStream - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - - tc := testpb.NewTestServiceClient(te.clientConn()) - respParam := []*testpb.ResponseParameters{ - { - Size: int32(1), - }, - } - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1)) - if err != nil { - t.Fatal(err) - } - req := &testpb.StreamingOutputCallRequest{ + req := &testpb.StreamingOutputCallRequest{ ResponseType: testpb.PayloadType_COMPRESSABLE, ResponseParameters: respParam, Payload: payload, @@ -4482,7 +3753,7 @@ func testUnaryServerInterceptor(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.PermissionDenied { @@ -4514,7 +3785,7 @@ func testStreamServerInterceptor(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { Size: int32(1), @@ -4555,21 +3826,21 @@ func testStreamServerInterceptor(t *testing.T, e env) { // Any unimplemented method will crash. Tests implement the method(s) // they need. type funcServer struct { - testpb.TestServiceServer + testgrpc.TestServiceServer unaryCall func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) - streamingInputCall func(stream testpb.TestService_StreamingInputCallServer) error - fullDuplexCall func(stream testpb.TestService_FullDuplexCallServer) error + streamingInputCall func(stream testgrpc.TestService_StreamingInputCallServer) error + fullDuplexCall func(stream testgrpc.TestService_FullDuplexCallServer) error } func (s *funcServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return s.unaryCall(ctx, in) } -func (s *funcServer) StreamingInputCall(stream testpb.TestService_StreamingInputCallServer) error { +func (s *funcServer) StreamingInputCall(stream testgrpc.TestService_StreamingInputCallServer) error { return s.streamingInputCall(stream) } -func (s *funcServer) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (s *funcServer) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { return s.fullDuplexCall(stream) } @@ -4665,7 +3936,7 @@ func (s) TestClientRequestBodyErrorCancelStreamingInput(t *testing.T) { func testClientRequestBodyErrorCancelStreamingInput(t *testing.T, e env) { te := newTest(t, e) recvErr := make(chan error, 1) - ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error { _, err := stream.Recv() recvErr <- err return nil @@ -4707,7 +3978,7 @@ func testClientInitialHeaderEndStream(t *testing.T, e env) { // checking. handlerDone := make(chan struct{}) te := newTest(t, e) - ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error { defer close(handlerDone) // Block on serverTester receiving RST_STREAM. This ensures server has closed // stream before stream.Recv(). @@ -4751,7 +4022,7 @@ func testClientSendDataAfterCloseSend(t *testing.T, e env) { // checking. handlerDone := make(chan struct{}) te := newTest(t, e) - ts := &funcServer{streamingInputCall: func(stream testpb.TestService_StreamingInputCallServer) error { + ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error { defer close(handlerDone) // Block on serverTester receiving RST_STREAM. This ensures server has closed // stream before stream.Recv(). @@ -4770,8 +4041,8 @@ func testClientSendDataAfterCloseSend(t *testing.T, e env) { } if err := stream.SendMsg(nil); err == nil { t.Error("expected error sending message on stream after stream closed due to illegal data") - } else if status.Code(err) != codes.Internal { - t.Errorf("expected internal error, instead received '%v'", err) + } else if status.Code(err) != codes.Canceled { + t.Errorf("expected cancel error, instead received '%v'", err) } return nil }} @@ -4803,7 +4074,7 @@ func (s) TestClientResourceExhaustedCancelFullDuplex(t *testing.T) { func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) { te := newTest(t, e) recvErr := make(chan error, 1) - ts := &funcServer{fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { + ts := &funcServer{fullDuplexCall: func(stream testgrpc.TestService_FullDuplexCallServer) error { defer close(recvErr) _, err := stream.Recv() if err != nil { @@ -4841,7 +4112,7 @@ func testClientResourceExhaustedCancelFullDuplex(t *testing.T, e env) { // client side when server send a large message. te.maxClientReceiveMsgSize = newInt(10) cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() @@ -4895,7 +4166,7 @@ func (s) TestFailfastRPCFailOnFatalHandshakeError(t *testing.T) { } defer cc.Close() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) // This unary call should fail, but not timeout. ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -4929,7 +4200,7 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { defer lis.Close() s := grpc.NewServer() - testpb.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{ + testgrpc.RegisterTestServiceServer(s, &flowControlLogicalRaceServer{ itemCount: itemCount, itemSize: itemSize, }) @@ -4937,17 +4208,16 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { go s.Serve(lis) - ctx := context.Background() - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("grpc.Dial(%q) = %v", lis.Addr().String(), err) } defer cc.Close() - cl := testpb.NewTestServiceClient(cc) + cl := testgrpc.NewTestServiceClient(cc) failures := 0 for i := 0; i < requestCount; i++ { - ctx, cancel := context.WithTimeout(ctx, requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) output, err := cl.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{}) if err != nil { t.Fatalf("StreamingOutputCall; err = %q", err) @@ -4985,13 +4255,13 @@ func (s) TestFlowControlLogicalRace(t *testing.T) { } type flowControlLogicalRaceServer struct { - testpb.TestServiceServer + testgrpc.TestServiceServer itemSize int itemCount int } -func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testpb.TestService_StreamingOutputCallServer) error { +func (s *flowControlLogicalRaceServer) StreamingOutputCall(req *testpb.StreamingOutputCallRequest, srv testgrpc.TestService_StreamingOutputCallServer) error { for i := 0; i < s.itemCount; i++ { err := srv.Send(&testpb.StreamingOutputCallResponse{ Payload: &testpb.Payload{ @@ -5145,7 +4415,7 @@ func (s) TestGRPCMethod(t *testing.T) { } defer ss.Stop() - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -5157,6 +4427,86 @@ func (s) TestGRPCMethod(t *testing.T) { } } +// renameProtoCodec is an encoding.Codec wrapper that allows customizing the +// Name() of another codec. +type renameProtoCodec struct { + encoding.Codec + name string +} + +func (r *renameProtoCodec) Name() string { return r.name } + +// TestForceCodecName confirms that the ForceCodec call option sets the subtype +// in the content-type header according to the Name() of the codec provided. +func (s) TestForceCodecName(t *testing.T) { + wantContentTypeCh := make(chan []string, 1) + defer close(wantContentTypeCh) + + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Errorf(codes.Internal, "no metadata in context") + } + if got, want := md["content-type"], <-wantContentTypeCh; !reflect.DeepEqual(got, want) { + return nil, status.Errorf(codes.Internal, "got content-type=%q; want [%q]", got, want) + } + return &testpb.Empty{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(encoding.GetCodec("proto"))}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + codec := &renameProtoCodec{Codec: encoding.GetCodec("proto"), name: "some-test-name"} + wantContentTypeCh <- []string{"application/grpc+some-test-name"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + // Confirm the name is converted to lowercase before transmitting. + codec.name = "aNoTHeRNaME" + wantContentTypeCh <- []string{"application/grpc+anothername"} + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}, grpc.ForceCodec(codec)); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } +} + +func (s) TestForceServerCodec(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + codec := &countingProtoCodec{} + if err := ss.Start([]grpc.ServerOption{grpc.ForceServerCodec(codec)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v; want _, nil", err) + } + + unmarshalCount := atomic.LoadInt32(&codec.unmarshalCount) + const wantUnmarshalCount = 1 + if unmarshalCount != wantUnmarshalCount { + t.Fatalf("protoCodec.unmarshalCount = %d; want %d", unmarshalCount, wantUnmarshalCount) + } + marshalCount := atomic.LoadInt32(&codec.marshalCount) + const wantMarshalCount = 1 + if marshalCount != wantMarshalCount { + t.Fatalf("protoCodec.marshalCount = %d; want %d", marshalCount, wantMarshalCount) + } +} + func (s) TestUnaryProxyDoesNotForwardMetadata(t *testing.T) { const mdkey = "somedata" @@ -5211,7 +4561,7 @@ func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { // doFDC performs a FullDuplexCall with client and returns the error from the // first stream.Recv call, or nil if that error is io.EOF. Calls t.Fatal if // the stream cannot be established. - doFDC := func(ctx context.Context, client testpb.TestServiceClient) error { + doFDC := func(ctx context.Context, client testgrpc.TestServiceClient) error { stream, err := client.FullDuplexCall(ctx) if err != nil { t.Fatalf("Unwanted error: %v", err) @@ -5224,7 +4574,7 @@ func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { // endpoint ensures mdkey is NOT in metadata and returns an error if it is. endpoint := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { ctx := stream.Context() if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] != nil { return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) @@ -5240,7 +4590,7 @@ func (s) TestStreamingProxyDoesNotForwardMetadata(t *testing.T) { // proxy ensures mdkey IS in metadata, then forwards the RPC to endpoint // without explicitly copying the metadata. proxy := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { ctx := stream.Context() if md, ok := metadata.FromIncomingContext(ctx); !ok || md[mdkey] == nil { return status.Errorf(codes.Internal, "endpoint: md=%v; want !contains(%q)", md, mdkey) @@ -5362,7 +4712,7 @@ func (s) TestTapTimeout(t *testing.T) { func (s) TestClientWriteFailsAfterServerClosesStream(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { return status.Errorf(codes.Internal, "") }, } @@ -5437,7 +4787,7 @@ func testConfigurableWindowSize(t *testing.T, e env, wc windowSizeConfig) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() stream, err := tc.FullDuplexCall(ctx) @@ -5489,18 +4839,10 @@ func testWaitForReadyConnection(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() // Non-blocking dial. - tc := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - state := cc.GetState() - // Wait for connection to be Ready. - for ; state != connectivity.Ready && cc.WaitForStateChange(ctx, state); state = cc.GetState() { - } - if state != connectivity.Ready { - t.Fatalf("Want connection state to be Ready, got %v", state) - } - ctx, cancel = context.WithTimeout(context.Background(), time.Second) + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) // Make a fail-fast RPC. if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("TestService/EmptyCall(_,_) = _, %v, want _, nil", err) @@ -5526,6 +4868,33 @@ func (c *errCodec) Name() string { return "Fermat's near-miss." } +type countingProtoCodec struct { + marshalCount int32 + unmarshalCount int32 +} + +func (p *countingProtoCodec) Marshal(v interface{}) ([]byte, error) { + atomic.AddInt32(&p.marshalCount, 1) + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (p *countingProtoCodec) Unmarshal(data []byte, v interface{}) error { + atomic.AddInt32(&p.unmarshalCount, 1) + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + return proto.Unmarshal(data, vv) +} + +func (*countingProtoCodec) Name() string { + return "proto" +} + func (s) TestEncodeDoesntPanic(t *testing.T) { for _, e := range listTestEnv() { testEncodeDoesntPanic(t, e) @@ -5539,7 +4908,7 @@ func testEncodeDoesntPanic(t *testing.T, e env) { te.startServer(&testServer{security: e.security}) defer te.tearDown() te.customCodec = nil - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() // Failure case, should not panic. @@ -5574,7 +4943,7 @@ func testSvrWriteStatusEarlyWrite(t *testing.T, e env) { } te.startServer(&testServer{security: e.security}) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) respParam := []*testpb.ResponseParameters{ { Size: int32(smallSize), @@ -5612,448 +4981,41 @@ func testSvrWriteStatusEarlyWrite(t *testing.T, e env) { } } -// The following functions with function name ending with TD indicates that they -// should be deleted after old service config API is deprecated and deleted. -func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) { - te := newTest(t, e) - // We write before read. - ch := make(chan grpc.ServiceConfig, 1) - te.sc = ch - te.userAgent = testAppUA - te.declareLogNoise( - "transport: http2Client.notifyError got notified that the client transport was broken EOF", - "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", - "grpc: addrConn.resetTransport failed to create client transport: connection error", - "Failed to dial : context canceled; please retry.", - ) - return te, ch -} - -func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) { - for _, e := range listTestEnv() { - testGetMethodConfigTD(t, e) - } -} - -func testGetMethodConfigTD(t *testing.T, e env) { - te, ch := testServiceConfigSetupTD(t, e) +// TestMalformedStreamMethod starts a test server and sends an RPC with a +// malformed method name. The server should respond with an UNIMPLEMENTED status +// code in this case. +func (s) TestMalformedStreamMethod(t *testing.T) { + const testMethod = "a-method-name-without-any-slashes" + te := newTest(t, tcpClearRREnv) + te.startServer(nil) defer te.tearDown() - mc1 := grpc.MethodConfig{ - WaitForReady: newBool(true), - Timeout: newDuration(time.Millisecond), - } - mc2 := grpc.MethodConfig{WaitForReady: newBool(false)} - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc1 - m["/grpc.testing.TestService/"] = mc2 - sc := grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - - m = make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/UnaryCall"] = mc1 - m["/grpc.testing.TestService/"] = mc2 - sc = grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - // Wait for the new service config to propagate. - for { - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - break - } - } - // The following RPCs are expected to become fail-fast. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) + err := te.clientConn().Invoke(ctx, testMethod, nil, nil) + if gotCode := status.Code(err); gotCode != codes.Unimplemented { + t.Fatalf("Invoke with method %q, got code %s, want %s", testMethod, gotCode, codes.Unimplemented) } } -func (s) TestServiceConfigWaitForReadyTD(t *testing.T) { - for _, e := range listTestEnv() { - testServiceConfigWaitForReadyTD(t, e) +func (s) TestMethodFromServerStream(t *testing.T) { + const testMethod = "/package.service/method" + e := tcpClearRREnv + te := newTest(t, e) + var method string + var ok bool + te.unknownHandler = func(srv interface{}, stream grpc.ServerStream) error { + method, ok = grpc.MethodFromServerStream(stream) + return nil } -} -func testServiceConfigWaitForReadyTD(t *testing.T, e env) { - te, ch := testServiceConfigSetupTD(t, e) + te.startServer(nil) defer te.tearDown() - - // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. - mc := grpc.MethodConfig{ - WaitForReady: newBool(false), - Timeout: newDuration(time.Millisecond), - } - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc := grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } - - // Generate a service config update. - // Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. - mc.WaitForReady = newBool(true) - m = make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc = grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - // Wait for the new service config to take effect. - mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") - for { - if !*mc.WaitForReady { - time.Sleep(100 * time.Millisecond) - mc = cc.GetMethodConfig("/grpc.testing.TestService/EmptyCall") - continue - } - break - } - // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } -} - -func (s) TestServiceConfigTimeoutTD(t *testing.T) { - for _, e := range listTestEnv() { - testServiceConfigTimeoutTD(t, e) - } -} - -func testServiceConfigTimeoutTD(t *testing.T, e env) { - te, ch := testServiceConfigSetupTD(t, e) - defer te.tearDown() - - // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. - mc := grpc.MethodConfig{ - Timeout: newDuration(time.Hour), - } - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc := grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) - // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. - ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - cancel() - ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) - if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } - cancel() - - // Generate a service config update. - // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. - mc.Timeout = newDuration(time.Nanosecond) - m = make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/EmptyCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc = grpc.ServiceConfig{ - Methods: m, - } - ch <- sc - - // Wait for the new service config to take effect. - mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") - for { - if *mc.Timeout != time.Nanosecond { - time.Sleep(100 * time.Millisecond) - mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") - continue - } - break - } - - ctx, cancel = context.WithTimeout(context.Background(), time.Hour) - if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) - } - cancel() - - ctx, cancel = context.WithTimeout(context.Background(), time.Hour) - if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { - t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) - } - cancel() -} - -func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) { - for _, e := range listTestEnv() { - testServiceConfigMaxMsgSizeTD(t, e) - } -} - -func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { - // Setting up values and objects shared across all test cases. - const smallSize = 1 - const largeSize = 1024 - const extraLargeSize = 2048 - - smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) - if err != nil { - t.Fatal(err) - } - largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) - if err != nil { - t.Fatal(err) - } - extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) - if err != nil { - t.Fatal(err) - } - - mc := grpc.MethodConfig{ - MaxReqSize: newInt(extraLargeSize), - MaxRespSize: newInt(extraLargeSize), - } - - m := make(map[string]grpc.MethodConfig) - m["/grpc.testing.TestService/UnaryCall"] = mc - m["/grpc.testing.TestService/FullDuplexCall"] = mc - sc := grpc.ServiceConfig{ - Methods: m, - } - // Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv). - te1, ch1 := testServiceConfigSetupTD(t, e) - te1.startServer(&testServer{security: e.security}) - defer te1.tearDown() - - ch1 <- sc - tc := testpb.NewTestServiceClient(te1.clientConn()) - - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: int32(extraLargeSize), - Payload: smallPayload, - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - // Test for unary RPC recv. - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for unary RPC send. - req.Payload = extraLargePayload - req.ResponseSize = int32(smallSize) - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for streaming RPC recv. - respParam := []*testpb.ResponseParameters{ - { - Size: int32(extraLargeSize), - }, - } - sreq := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: smallPayload, - } - stream, err := tc.FullDuplexCall(te1.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) - } - - // Test for streaming RPC send. - respParam[0].Size = int32(smallSize) - sreq.Payload = extraLargePayload - stream, err = tc.FullDuplexCall(te1.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) - } - - // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). - te2, ch2 := testServiceConfigSetupTD(t, e) - te2.maxClientReceiveMsgSize = newInt(1024) - te2.maxClientSendMsgSize = newInt(1024) - te2.startServer(&testServer{security: e.security}) - defer te2.tearDown() - ch2 <- sc - tc = testpb.NewTestServiceClient(te2.clientConn()) - - // Test for unary RPC recv. - req.Payload = smallPayload - req.ResponseSize = int32(largeSize) - - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for unary RPC send. - req.Payload = largePayload - req.ResponseSize = int32(smallSize) - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for streaming RPC recv. - stream, err = tc.FullDuplexCall(te2.ctx) - respParam[0].Size = int32(largeSize) - sreq.Payload = smallPayload - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) - } - - // Test for streaming RPC send. - respParam[0].Size = int32(smallSize) - sreq.Payload = largePayload - stream, err = tc.FullDuplexCall(te2.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) - } - - // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). - te3, ch3 := testServiceConfigSetupTD(t, e) - te3.maxClientReceiveMsgSize = newInt(4096) - te3.maxClientSendMsgSize = newInt(4096) - te3.startServer(&testServer{security: e.security}) - defer te3.tearDown() - ch3 <- sc - tc = testpb.NewTestServiceClient(te3.clientConn()) - - // Test for unary RPC recv. - req.Payload = smallPayload - req.ResponseSize = int32(largeSize) - - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) - } - - req.ResponseSize = int32(extraLargeSize) - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for unary RPC send. - req.Payload = largePayload - req.ResponseSize = int32(smallSize) - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) - } - - req.Payload = extraLargePayload - if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) - } - - // Test for streaming RPC recv. - stream, err = tc.FullDuplexCall(te3.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - respParam[0].Size = int32(largeSize) - sreq.Payload = smallPayload - - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = _, %v, want ", stream, err) - } - - respParam[0].Size = int32(extraLargeSize) - - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) - } - - // Test for streaming RPC send. - respParam[0].Size = int32(smallSize) - sreq.Payload = largePayload - stream, err = tc.FullDuplexCall(te3.ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - sreq.Payload = extraLargePayload - if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { - t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) - } -} - -func (s) TestMethodFromServerStream(t *testing.T) { - const testMethod = "/package.service/method" - e := tcpClearRREnv - te := newTest(t, e) - var method string - var ok bool - te.unknownHandler = func(srv interface{}, stream grpc.ServerStream) error { - method, ok = grpc.MethodFromServerStream(stream) - return nil - } - - te.startServer(nil) - defer te.tearDown() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - _ = te.clientConn().Invoke(ctx, testMethod, nil, nil) - if !ok || method != testMethod { - t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod) + _ = te.clientConn().Invoke(ctx, testMethod, nil, nil) + if !ok || method != testMethod { + t.Fatalf("Invoke with method %q, got %q, %v, want %q, true", testMethod, method, ok, testMethod) } } @@ -6113,7 +5075,7 @@ func (s) TestInterceptorCanAccessCallOptions(t *testing.T) { grpc.WaitForReady(true), grpc.MaxCallRecvMsgSize(1010), } - tc := testpb.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...))) + tc := testgrpc.NewTestServiceClient(te.clientConn(grpc.WithDefaultCallOptions(defaults...))) var headers metadata.MD var trailers metadata.MD @@ -6161,67 +5123,6 @@ func (s) TestInterceptorCanAccessCallOptions(t *testing.T) { } } -func (s) TestCompressorRegister(t *testing.T) { - for _, e := range listTestEnv() { - testCompressorRegister(t, e) - } -} - -func testCompressorRegister(t *testing.T, e env) { - te := newTest(t, e) - te.clientCompression = false - te.serverCompression = false - te.clientUseCompression = true - - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - - // Unary call - const argSize = 271828 - const respSize = 314159 - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, argSize) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseSize: respSize, - Payload: payload, - } - ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs("something", "something")) - if _, err := tc.UnaryCall(ctx, req); err != nil { - t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, ", err) - } - // Streaming RPC - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - stream, err := tc.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) - } - respParam := []*testpb.ResponseParameters{ - { - Size: 31415, - }, - } - payload, err = newPayload(testpb.PayloadType_COMPRESSABLE, int32(31415)) - if err != nil { - t.Fatal(err) - } - sreq := &testpb.StreamingOutputCallRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - ResponseParameters: respParam, - Payload: payload, - } - if err := stream.Send(sreq); err != nil { - t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) - } - if _, err := stream.Recv(); err != nil { - t.Fatalf("%v.Recv() = %v, want ", stream, err) - } -} - func (s) TestServeExitsWhenListenerClosed(t *testing.T) { ss := &stubserver.StubServer{ EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { @@ -6231,7 +5132,7 @@ func (s) TestServeExitsWhenListenerClosed(t *testing.T) { s := grpc.NewServer() defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -6244,12 +5145,12 @@ func (s) TestServeExitsWhenListenerClosed(t *testing.T) { close(done) }() - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Failed to dial server: %v", err) } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -6298,7 +5199,7 @@ func (s) TestStatusInvalidUTF8Message(t *testing.T) { // will fail to marshal the status because of the invalid utf8 message. Details // will be dropped when sending. func (s) TestStatusInvalidUTF8Details(t *testing.T) { - grpctest.TLogger.ExpectError("transport: failed to marshal rpc status") + grpctest.TLogger.ExpectError("Failed to marshal rpc status") var ( origMsg = string([]byte{0xff, 0xfe, 0xfd}) @@ -6334,49 +5235,6 @@ func (s) TestStatusInvalidUTF8Details(t *testing.T) { } } -func (s) TestClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T) { - for _, e := range listTestEnv() { - if e.httpHandler { - continue - } - testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t, e) - } -} - -func testClientDoesntDeadlockWhileWritingErrornousLargeMessages(t *testing.T, e env) { - te := newTest(t, e) - te.userAgent = testAppUA - smallSize := 1024 - te.maxServerReceiveMsgSize = &smallSize - te.startServer(&testServer{security: e.security}) - defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) - payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1048576) - if err != nil { - t.Fatal(err) - } - req := &testpb.SimpleRequest{ - ResponseType: testpb.PayloadType_COMPRESSABLE, - Payload: payload, - } - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for j := 0; j < 100; j++ { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10)) - defer cancel() - if _, err := tc.UnaryCall(ctx, req); status.Code(err) != codes.ResourceExhausted { - t.Errorf("TestService/UnaryCall(_,_) = _. %v, want code: %s", err, codes.ResourceExhausted) - return - } - } - }() - } - wg.Wait() -} - func (s) TestRPCTimeout(t *testing.T) { for _, e := range listTestEnv() { testRPCTimeout(t, e) @@ -6389,7 +5247,7 @@ func testRPCTimeout(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) const argSize = 2718 const respSize = 314 @@ -6426,7 +5284,7 @@ func (s) TestDisabledIOBuffers(t *testing.T) { } ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for { in, err := stream.Recv() if err == io.EOF { @@ -6450,27 +5308,25 @@ func (s) TestDisabledIOBuffers(t *testing.T) { } s := grpc.NewServer(grpc.WriteBufferSize(0), grpc.ReadBufferSize(0)) - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to create listener: %v", err) } - done := make(chan struct{}) go func() { s.Serve(lis) - close(done) }() defer s.Stop() dctx, dcancel := context.WithTimeout(context.Background(), 5*time.Second) defer dcancel() - cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithInsecure(), grpc.WithBlock(), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0)) + cc, err := grpc.DialContext(dctx, lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithWriteBufferSize(0), grpc.WithReadBufferSize(0)) if err != nil { t.Fatalf("Failed to dial server") } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := c.FullDuplexCall(ctx, grpc.WaitForReady(true)) @@ -6512,7 +5368,7 @@ func testServerMaxHeaderListSizeClientUserViolation(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() metadata.AppendToOutgoingContext(ctx, "oversize", string(make([]byte, 216))) @@ -6544,7 +5400,7 @@ func testClientMaxHeaderListSizeServerUserViolation(t *testing.T, e env) { defer te.tearDown() cc := te.clientConn() - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() var err error @@ -6575,7 +5431,7 @@ func testServerMaxHeaderListSizeClientIntentionalViolation(t *testing.T, e env) defer te.tearDown() cc, dw := te.clientConnWithConnControl() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx) @@ -6616,7 +5472,7 @@ func testClientMaxHeaderListSizeServerIntentionalViolation(t *testing.T, e env) lw := te.startServerWithConnControl(&testServer{security: e.security, setHeaderOnly: true}) defer te.tearDown() cc, _ := te.clientConnWithConnControl() - tc := &testServiceClientWrapper{TestServiceClient: testpb.NewTestServiceClient(cc)} + tc := &testServiceClientWrapper{TestServiceClient: testgrpc.NewTestServiceClient(cc)} ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() stream, err := tc.FullDuplexCall(ctx) @@ -6663,16 +5519,16 @@ func (s) TestNetPipeConn(t *testing.T) { ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{}, nil }} - testpb.RegisterTestServiceServer(s, ts) + testgrpc.RegisterTestServiceServer(s, ts) go s.Serve(pl) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithDialer(pl.Dialer())) + cc, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDialer(pl.Dialer())) if err != nil { t.Fatalf("Error creating client: %v", err) } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) } @@ -6691,7 +5547,7 @@ func testLargeTimeout(t *testing.T, e env) { ts := &funcServer{} te.startServer(ts) defer te.tearDown() - tc := testpb.NewTestServiceClient(te.clientConn()) + tc := testgrpc.NewTestServiceClient(te.clientConn()) timeouts := []time.Duration{ time.Duration(math.MaxInt64), // will be (correctly) converted to @@ -6720,92 +5576,6 @@ func testLargeTimeout(t *testing.T, e env) { } } -// Proxies typically send GO_AWAY followed by connection closure a minute or so later. This -// test ensures that the connection is re-created after GO_AWAY and not affected by the -// subsequent (old) connection closure. -func (s) TestGoAwayThenClose(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - - lis1, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Error while listening. Err: %v", err) - } - s1 := grpc.NewServer() - defer s1.Stop() - ts := &funcServer{ - unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{}, nil - }, - fullDuplexCall: func(stream testpb.TestService_FullDuplexCallServer) error { - // Wait forever. - _, err := stream.Recv() - if err == nil { - t.Error("expected to never receive any message") - } - return err - }, - } - testpb.RegisterTestServiceServer(s1, ts) - go s1.Serve(lis1) - - conn2Established := grpcsync.NewEvent() - lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established) - if err != nil { - t.Fatalf("Error while listening. Err: %v", err) - } - s2 := grpc.NewServer() - defer s2.Stop() - testpb.RegisterTestServiceServer(s2, ts) - go s2.Serve(lis2) - - r := manual.NewBuilderWithScheme("whatever") - r.InitialState(resolver.State{Addresses: []resolver.Address{ - {Addr: lis1.Addr().String()}, - }}) - cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithInsecure()) - if err != nil { - t.Fatalf("Error creating client: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - - // Should go on connection 1. We use a long-lived RPC because it will cause GracefulStop to send GO_AWAY, but the - // connection doesn't get closed until the server stops and the client receives. - stream, err := client.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err) - } - - r.UpdateState(resolver.State{Addresses: []resolver.Address{ - {Addr: lis1.Addr().String()}, - {Addr: lis2.Addr().String()}, - }}) - - // Send GO_AWAY to connection 1. - go s1.GracefulStop() - - // Wait for connection 2 to be established. - <-conn2Established.Done() - - // Close connection 1. - s1.Stop() - - // Wait for client to close. - _, err = stream.Recv() - if err == nil { - t.Fatal("expected the stream to die, but got a successful Recv") - } - - // Do a bunch of RPCs, make sure it stays stable. These should go to connection 2. - for i := 0; i < 10; i++ { - if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { - t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) - } - } -} - func listenWithNotifyingListener(network, address string, event *grpcsync.Event) (net.Listener, error) { lis, err := net.Listen(network, address) if err != nil { @@ -6831,9 +5601,8 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { r := manual.NewBuilderWithScheme("whatever") te.resolverScheme = r.Scheme() - te.nonBlockingDial = true cc := te.clientConn(grpc.WithResolvers(r)) - tc := testpb.NewTestServiceClient(cc) + tc := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) defer cancel() @@ -6848,7 +5617,7 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { time.Sleep(time.Second) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: te.srvAddr}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "methodConfig": [ { "name": [ @@ -6865,7 +5634,11 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { // We wait a second before providing a service config and resolving // addresses. So this will wait for that and then honor the // maxRequestMessageBytes it contains. - if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{ResponseType: testpb.PayloadType_UNCOMPRESSABLE}); status.Code(err) != codes.ResourceExhausted { + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, 1) + if err != nil { + t.Fatal(err) + } + if _, err := tc.UnaryCall(ctx, &testpb.SimpleRequest{Payload: payload}); status.Code(err) != codes.ResourceExhausted { t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, nil", err) } if got := ctx.Err(); got != nil { @@ -6876,128 +5649,18 @@ func (s) TestRPCWaitsForResolver(t *testing.T) { } } -func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) { - // Non-gRPC content-type fallback path. - for httpCode := range transport.HTTPStatusConvTab { - doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ - ":status", fmt.Sprintf("%d", httpCode), - "content-type", "text/html", // non-gRPC content type to switch to HTTP mode. - "grpc-status", "1", // Make up a gRPC status error - "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }) - } - - // Missing content-type fallback path. - for httpCode := range transport.HTTPStatusConvTab { - doHTTPHeaderTest(t, transport.HTTPStatusConvTab[int(httpCode)], []string{ - ":status", fmt.Sprintf("%d", httpCode), - // Omitting content type to switch to HTTP mode. - "grpc-status", "1", // Make up a gRPC status error - "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }) - } - - // Malformed HTTP status when fallback. - doHTTPHeaderTest(t, codes.Internal, []string{ - ":status", "abc", - // Omitting content type to switch to HTTP mode. - "grpc-status", "1", // Make up a gRPC status error - "grpc-status-details-bin", "???", // Make up a gRPC field parsing error - }) -} - -// Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame). -func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { - for _, test := range []struct { - header []string - errCode codes.Code - }{ - { - // missing gRPC status. - header: []string{ - ":status", "403", - "content-type", "application/grpc", - }, - errCode: codes.Unknown, - }, - { - // malformed grpc-status. - header: []string{ - ":status", "502", - "content-type", "application/grpc", - "grpc-status", "abc", - }, - errCode: codes.Internal, - }, - { - // Malformed grpc-tags-bin field. - header: []string{ - ":status", "502", - "content-type", "application/grpc", - "grpc-status", "0", - "grpc-tags-bin", "???", - }, - errCode: codes.Internal, - }, - { - // gRPC status error. - header: []string{ - ":status", "502", - "content-type", "application/grpc", - "grpc-status", "3", - }, - errCode: codes.InvalidArgument, - }, - } { - doHTTPHeaderTest(t, test.errCode, test.header) - } -} - -// Testing non-Trailers-only Trailers (delievered in second HEADERS frame) -func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { - for _, test := range []struct { - responseHeader []string - trailer []string - errCode codes.Code - }{ - { - responseHeader: []string{ - ":status", "200", - "content-type", "application/grpc", - }, - trailer: []string{ - // trailer missing grpc-status - ":status", "502", - }, - errCode: codes.Unknown, - }, - { - responseHeader: []string{ - ":status", "404", - "content-type", "application/grpc", - }, - trailer: []string{ - // malformed grpc-status-details-bin field - "grpc-status", "0", - "grpc-status-details-bin", "????", - }, - errCode: codes.Internal, - }, - } { - doHTTPHeaderTest(t, test.errCode, test.responseHeader, test.trailer) - } -} - -func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) { - header := []string{ - ":status", "200", - "content-type", "application/grpc", - } - doHTTPHeaderTest(t, codes.Internal, header, header, header) +type httpServerResponse struct { + headers [][]string + payload []byte + trailers [][]string } type httpServer struct { - headerFields [][]string + // If waitForEndStream is set, wait for the client to send a frame with end + // stream in it before sending a response/refused stream. + waitForEndStream bool + refuseStream func(uint32) bool + responses []httpServerResponse } func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields []string, endStream bool) error { @@ -7021,6 +5684,10 @@ func (s *httpServer) writeHeader(framer *http2.Framer, sid uint32, headerFields }) } +func (s *httpServer) writePayload(framer *http2.Framer, sid uint32, payload []byte) error { + return framer.WriteData(sid, false, payload) +} + func (s *httpServer) start(t *testing.T, lis net.Listener) { // Launch an HTTP server to send back header. go func() { @@ -7043,66 +5710,71 @@ func (s *httpServer) start(t *testing.T, lis net.Listener) { return } writer.Flush() // necessary since client is expecting preface before declaring connection fully setup. - var sid uint32 - // Read frames until a header is received. - for { - frame, err := framer.ReadFrame() - if err != nil { - t.Errorf("Error at server-side while reading frame. Err: %v", err) - return + // Loop until framer returns possible conn closed errors. + for requestNum := 0; ; requestNum = (requestNum + 1) % len(s.responses) { + // Read frames until a header is received. + for { + frame, err := framer.ReadFrame() + if err != nil { + if !isConnClosedErr(err) { + t.Errorf("Error at server-side while reading frame. got: %q, want: rpc error containing substring %q OR %q", err, possibleConnResetMsg, possibleEOFMsg) + } + return + } + sid = 0 + switch fr := frame.(type) { + case *http2.HeadersFrame: + // Respond after this if we are not waiting for an end + // stream or if this frame ends it. + if !s.waitForEndStream || fr.StreamEnded() { + sid = fr.Header().StreamID + } + + case *http2.DataFrame: + // Respond after this if we were waiting for an end stream + // and this frame ends it. (If we were not waiting for an + // end stream, this stream was already responded to when + // the headers were received.) + if s.waitForEndStream && fr.StreamEnded() { + sid = fr.Header().StreamID + } + } + if sid != 0 { + if s.refuseStream == nil || !s.refuseStream(sid) { + break + } + framer.WriteRSTStream(sid, http2.ErrCodeRefusedStream) + writer.Flush() + } } - if hframe, ok := frame.(*http2.HeadersFrame); ok { - sid = hframe.Header().StreamID - break + + response := s.responses[requestNum] + for _, header := range response.headers { + if err = s.writeHeader(framer, sid, header, false); err != nil { + t.Errorf("Error at server-side while writing headers. Err: %v", err) + return + } + writer.Flush() } - } - for i, headers := range s.headerFields { - if err = s.writeHeader(framer, sid, headers, i == len(s.headerFields)-1); err != nil { - t.Errorf("Error at server-side while writing headers. Err: %v", err) - return + if response.payload != nil { + if err = s.writePayload(framer, sid, response.payload); err != nil { + t.Errorf("Error at server-side while writing payload. Err: %v", err) + return + } + writer.Flush() + } + for i, trailer := range response.trailers { + if err = s.writeHeader(framer, sid, trailer, i == len(response.trailers)-1); err != nil { + t.Errorf("Error at server-side while writing trailers. Err: %v", err) + return + } + writer.Flush() } - writer.Flush() } }() } -func doHTTPHeaderTest(t *testing.T, errCode codes.Code, headerFields ...[]string) { - t.Helper() - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Failed to listen. Err: %v", err) - } - defer lis.Close() - server := &httpServer{ - headerFields: headerFields, - } - server.start(t, lis) - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) - if err != nil { - t.Fatalf("failed to dial due to err: %v", err) - } - defer cc.Close() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - client := testpb.NewTestServiceClient(cc) - stream, err := client.FullDuplexCall(ctx) - if err != nil { - t.Fatalf("error creating stream due to err: %v", err) - } - if _, err := stream.Recv(); err == nil || status.Code(err) != errCode { - t.Fatalf("stream.Recv() = _, %v, want error code: %v", err, errCode) - } -} - -func parseCfg(r *manual.Resolver, s string) *serviceconfig.ParseResult { - g := r.CC.ParseServiceConfig(s) - if g.Err != nil { - panic(fmt.Sprintf("Error parsing config %q: %v", s, g.Err)) - } - return g -} - func (s) TestClientCancellationPropagatesUnary(t *testing.T) { wg := &sync.WaitGroup{} called, done := make(chan struct{}), make(chan struct{}) @@ -7147,57 +5819,11 @@ func (s) TestClientCancellationPropagatesUnary(t *testing.T) { wg.Wait() } -type badGzipCompressor struct{} - -func (badGzipCompressor) Do(w io.Writer, p []byte) error { - buf := &bytes.Buffer{} - gzw := gzip.NewWriter(buf) - if _, err := gzw.Write(p); err != nil { - return err - } - err := gzw.Close() - bs := buf.Bytes() - if len(bs) >= 6 { - bs[len(bs)-6] ^= 1 // modify checksum at end by 1 byte - } - w.Write(bs) - return err -} - -func (badGzipCompressor) Type() string { - return "gzip" -} - -func (s) TestGzipBadChecksum(t *testing.T) { - ss := &stubserver.StubServer{ - UnaryCallF: func(ctx context.Context, _ *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{}, nil - }, - } - if err := ss.Start(nil, grpc.WithCompressor(badGzipCompressor{})); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - p, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(1024)) - if err != nil { - t.Fatalf("Unexpected error from newPayload: %v", err) - } - if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{Payload: p}); err == nil || - status.Code(err) != codes.Internal || - !strings.Contains(status.Convert(err).Message(), gzip.ErrChecksum.Error()) { - t.Errorf("ss.Client.UnaryCall(_) = _, %v\n\twant: _, status(codes.Internal, contains %q)", err, gzip.ErrChecksum) - } -} - // When an RPC is canceled, it's possible that the last Recv() returns before // all call options' after are executed. func (s) TestCanceledRPCCallOptionRace(t *testing.T) { ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { err := stream.Send(&testpb.StreamingOutputCallResponse{}) if err != nil { return err @@ -7251,3 +5877,488 @@ func (s) TestCanceledRPCCallOptionRace(t *testing.T) { } wg.Wait() } + +func (s) TestClientSettingsFloodCloseConn(t *testing.T) { + // Tests that the server properly closes its transport if the client floods + // settings frames and then closes the connection. + + // Minimize buffer sizes to stimulate failure condition more quickly. + s := grpc.NewServer(grpc.WriteBufferSize(20)) + l := bufconn.Listen(20) + go s.Serve(l) + + // Dial our server and handshake. + conn, err := l.Dial() + if err != nil { + t.Fatalf("Error dialing bufconn: %v", err) + } + + n, err := conn.Write([]byte(http2.ClientPreface)) + if err != nil || n != len(http2.ClientPreface) { + t.Fatalf("Error writing client preface: %v, %v", n, err) + } + + fr := http2.NewFramer(conn, conn) + f, err := fr.ReadFrame() + if err != nil { + t.Fatalf("Error reading initial settings frame: %v", err) + } + if _, ok := f.(*http2.SettingsFrame); ok { + if err := fr.WriteSettingsAck(); err != nil { + t.Fatalf("Error writing settings ack: %v", err) + } + } else { + t.Fatalf("Error reading initial settings frame: type=%T", f) + } + + // Confirm settings can be written, and that an ack is read. + if err = fr.WriteSettings(); err != nil { + t.Fatalf("Error writing settings frame: %v", err) + } + if f, err = fr.ReadFrame(); err != nil { + t.Fatalf("Error reading frame: %v", err) + } + if sf, ok := f.(*http2.SettingsFrame); !ok || !sf.IsAck() { + t.Fatalf("Unexpected frame: %v", f) + } + + // Flood settings frames until a timeout occurs, indiciating the server has + // stopped reading from the connection, then close the conn. + for { + conn.SetWriteDeadline(time.Now().Add(50 * time.Millisecond)) + if err := fr.WriteSettings(); err != nil { + if to, ok := err.(interface{ Timeout() bool }); !ok || !to.Timeout() { + t.Fatalf("Received unexpected write error: %v", err) + } + break + } + } + conn.Close() + + // If the server does not handle this situation correctly, it will never + // close the transport. This is because its loopyWriter.run() will have + // exited, and thus not handle the goAway the draining process initiates. + // Also, we would see a goroutine leak in this case, as the reader would be + // blocked on the controlBuf's throttle() method indefinitely. + + timer := time.AfterFunc(5*time.Second, func() { + t.Errorf("Timeout waiting for GracefulStop to return") + s.Stop() + }) + s.GracefulStop() + timer.Stop() +} + +func unaryInterceptorVerifyConn(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + conn := transport.GetConnection(ctx) + if conn == nil { + return nil, status.Error(codes.NotFound, "connection was not in context") + } + return nil, status.Error(codes.OK, "") +} + +// TestUnaryServerInterceptorGetsConnection tests whether the accepted conn on +// the server gets to any unary interceptors on the server side. +func (s) TestUnaryServerInterceptorGetsConnection(t *testing.T) { + ss := &stubserver.StubServer{} + if err := ss.Start([]grpc.ServerOption{grpc.UnaryInterceptor(unaryInterceptorVerifyConn)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { + t.Fatalf("ss.Client.EmptyCall(_, _) = _, %v, want _, error code %s", err, codes.OK) + } +} + +func streamingInterceptorVerifyConn(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + conn := transport.GetConnection(ss.Context()) + if conn == nil { + return status.Error(codes.NotFound, "connection was not in context") + } + return status.Error(codes.OK, "") +} + +// TestStreamingServerInterceptorGetsConnection tests whether the accepted conn on +// the server gets to any streaming interceptors on the server side. +func (s) TestStreamingServerInterceptorGetsConnection(t *testing.T) { + ss := &stubserver.StubServer{} + if err := ss.Start([]grpc.ServerOption{grpc.StreamInterceptor(streamingInterceptorVerifyConn)}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + s, err := ss.Client.StreamingOutputCall(ctx, &testpb.StreamingOutputCallRequest{}) + if err != nil { + t.Fatalf("ss.Client.StreamingOutputCall(_) = _, %v, want _, ", err) + } + if _, err := s.Recv(); err != io.EOF { + t.Fatalf("ss.Client.StreamingInputCall(_) = _, %v, want _, %v", err, io.EOF) + } +} + +// unaryInterceptorVerifyAuthority verifies there is an unambiguous :authority +// once the request gets to an interceptor. An unambiguous :authority is defined +// as at most a single :authority header, and no host header according to A41. +func unaryInterceptorVerifyAuthority(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, status.Error(codes.NotFound, "metadata was not in context") + } + authority := md.Get(":authority") + if len(authority) > 1 { // Should be an unambiguous authority by the time it gets to interceptor. + return nil, status.Error(codes.NotFound, ":authority value had more than one value") + } + // Host header shouldn't be present by the time it gets to the interceptor + // level (should either be renamed to :authority or explicitly deleted). + host := md.Get("host") + if len(host) != 0 { + return nil, status.Error(codes.NotFound, "host header should not be present in metadata") + } + // Pass back the authority for verification on client - NotFound so + // grpc-message will be available to read for verification. + if len(authority) == 0 { + // Represent no :authority header present with an empty string. + return nil, status.Error(codes.NotFound, "") + } + return nil, status.Error(codes.NotFound, authority[0]) +} + +// TestAuthorityHeader tests that the eventual :authority that reaches the grpc +// layer is unambiguous due to logic added in A41. +func (s) TestAuthorityHeader(t *testing.T) { + tests := []struct { + name string + headers []string + wantAuthority string + }{ + // "If :authority is missing, Host must be renamed to :authority." - A41 + { + name: "Missing :authority", + // Codepath triggered by incoming headers with no authority but with + // a host. + headers: []string{ + ":method", "POST", + ":path", "/grpc.testing.TestService/UnaryCall", + "content-type", "application/grpc", + "te", "trailers", + "host", "localhost", + }, + wantAuthority: "localhost", + }, + { + name: "Missing :authority and host", + // Codepath triggered by incoming headers with no :authority and no + // host. + headers: []string{ + ":method", "POST", + ":path", "/grpc.testing.TestService/UnaryCall", + "content-type", "application/grpc", + "te", "trailers", + }, + wantAuthority: "", + }, + // "If :authority is present, Host must be discarded." - A41 + { + name: ":authority and host present", + // Codepath triggered by incoming headers with both an authority + // header and a host header. + headers: []string{ + ":method", "POST", + ":path", "/grpc.testing.TestService/UnaryCall", + ":authority", "localhost", + "content-type", "application/grpc", + "host", "localhost2", + }, + wantAuthority: "localhost", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + te := newTest(t, tcpClearRREnv) + ts := &funcServer{unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }} + te.unaryServerInt = unaryInterceptorVerifyAuthority + te.startServer(ts) + defer te.tearDown() + success := testutils.NewChannel() + te.withServerTester(func(st *serverTester) { + st.writeHeaders(http2.HeadersFrameParam{ + StreamID: 1, + BlockFragment: st.encodeHeader(test.headers...), + EndStream: false, + EndHeaders: true, + }) + st.writeData(1, true, []byte{0, 0, 0, 0, 0}) + + for { + frame := st.wantAnyFrame() + f, ok := frame.(*http2.MetaHeadersFrame) + if !ok { + continue + } + for _, header := range f.Fields { + if header.Name == "grpc-message" { + success.Send(header.Value) + return + } + } + } + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotAuthority, err := success.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving from channel: %v", err) + } + if gotAuthority != test.wantAuthority { + t.Fatalf("gotAuthority: %v, wantAuthority %v", gotAuthority, test.wantAuthority) + } + }) + } +} + +// wrapCloseListener tracks Accepts/Closes and maintains a counter of the +// number of open connections. +type wrapCloseListener struct { + net.Listener + connsOpen int32 +} + +// wrapCloseListener is returned by wrapCloseListener.Accept and decrements its +// connsOpen when Close is called. +type wrapCloseConn struct { + net.Conn + lis *wrapCloseListener + closeOnce sync.Once +} + +func (w *wrapCloseListener) Accept() (net.Conn, error) { + conn, err := w.Listener.Accept() + if err != nil { + return nil, err + } + atomic.AddInt32(&w.connsOpen, 1) + return &wrapCloseConn{Conn: conn, lis: w}, nil +} + +func (w *wrapCloseConn) Close() error { + defer w.closeOnce.Do(func() { atomic.AddInt32(&w.lis.connsOpen, -1) }) + return w.Conn.Close() +} + +// TestServerClosesConn ensures conn.Close is always closed even if the client +// doesn't complete the HTTP/2 handshake. +func (s) TestServerClosesConn(t *testing.T) { + lis := bufconn.Listen(20) + wrapLis := &wrapCloseListener{Listener: lis} + + s := grpc.NewServer() + go s.Serve(wrapLis) + defer s.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + for i := 0; i < 10; i++ { + conn, err := lis.DialContext(ctx) + if err != nil { + t.Fatalf("Dial = _, %v; want _, nil", err) + } + conn.Close() + } + for ctx.Err() == nil { + if atomic.LoadInt32(&wrapLis.connsOpen) == 0 { + return + } + time.Sleep(50 * time.Millisecond) + } + t.Fatalf("timed out waiting for conns to be closed by server; still open: %v", atomic.LoadInt32(&wrapLis.connsOpen)) +} + +// TestNilStatsHandler ensures we do not panic as a result of a nil stats +// handler. +func (s) TestNilStatsHandler(t *testing.T) { + grpctest.TLogger.ExpectErrorN("ignoring nil parameter", 2) + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + } + if err := ss.Start([]grpc.ServerOption{grpc.StatsHandler(nil)}, grpc.WithStatsHandler(nil)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } +} + +// TestUnexpectedEOF tests a scenario where a client invokes two unary RPC +// calls. The first call receives a payload which exceeds max grpc receive +// message length, and the second gets a large response. This second RPC should +// not fail with unexpected.EOF. +func (s) TestUnexpectedEOF(t *testing.T) { + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{ + Payload: &testpb.Payload{ + Body: bytes.Repeat([]byte("a"), int(in.ResponseSize)), + }, + }, nil + }, + } + if err := ss.Start([]grpc.ServerOption{}); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for i := 0; i < 10; i++ { + // exceeds grpc.DefaultMaxRecvMessageSize, this should error with + // RESOURCE_EXHAUSTED error. + _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{ResponseSize: 4194304}) + if code := status.Code(err); code != codes.ResourceExhausted { + t.Fatalf("UnaryCall RPC returned error: %v, want status code %v", err, codes.ResourceExhausted) + } + // Larger response that doesn't exceed DefaultMaxRecvMessageSize, this + // should work normally. + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{ResponseSize: 275075}); err != nil { + t.Fatalf("UnaryCall RPC failed: %v", err) + } + } +} + +// TestRecvWhileReturningStatus performs a Recv in a service handler while the +// handler returns its status. A race condition could result in the server +// sending the first headers frame without the HTTP :status header. This can +// happen when the failed Recv (due to the handler returning) and the handler's +// status both attempt to write the status, which would be the first headers +// frame sent, simultaneously. +func (s) TestRecvWhileReturningStatus(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + // The client never sends, so this Recv blocks until the server + // returns and causes stream operations to return errors. + go stream.Recv() + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + for i := 0; i < 100; i++ { + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Error while creating stream: %v", err) + } + if _, err := stream.Recv(); err != io.EOF { + t.Fatalf("stream.Recv() = %v, want io.EOF", err) + } + } +} + +type mockBinaryLogger struct { + mml *mockMethodLogger +} + +func newMockBinaryLogger() *mockBinaryLogger { + return &mockBinaryLogger{ + mml: &mockMethodLogger{}, + } +} + +func (mbl *mockBinaryLogger) GetMethodLogger(string) binarylog.MethodLogger { + return mbl.mml +} + +type mockMethodLogger struct { + events uint64 +} + +func (mml *mockMethodLogger) Log(context.Context, binarylog.LogEntryConfig) { + atomic.AddUint64(&mml.events, 1) +} + +// TestGlobalBinaryLoggingOptions tests the binary logging options for client +// and server side. The test configures a binary logger to be plumbed into every +// created ClientConn and server. It then makes a unary RPC call, and a +// streaming RPC call. A certain amount of logging calls should happen as a +// result of the stream operations on each of these calls. +func (s) TestGlobalBinaryLoggingOptions(t *testing.T) { + csbl := newMockBinaryLogger() + ssbl := newMockBinaryLogger() + + internal.AddGlobalDialOptions.(func(opt ...grpc.DialOption))(internal.WithBinaryLogger.(func(bl binarylog.Logger) grpc.DialOption)(csbl)) + internal.AddGlobalServerOptions.(func(opt ...grpc.ServerOption))(internal.BinaryLogger.(func(bl binarylog.Logger) grpc.ServerOption)(ssbl)) + defer func() { + internal.ClearGlobalDialOptions() + internal.ClearGlobalServerOptions() + }() + ss := &stubserver.StubServer{ + UnaryCallF: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for { + _, err := stream.Recv() + if err == io.EOF { + return nil + } + } + }, + } + + // No client or server options specified, because should pick up configured + // global options. + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Make a Unary RPC. This should cause Log calls on the MethodLogger. + if _, err := ss.Client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("Unexpected error from UnaryCall: %v", err) + } + if csbl.mml.events != 5 { + t.Fatalf("want 5 client side binary logging events, got %v", csbl.mml.events) + } + if ssbl.mml.events != 5 { + t.Fatalf("want 5 server side binary logging events, got %v", ssbl.mml.events) + } + + // Make a streaming RPC. This should cause Log calls on the MethodLogger. + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + stream.CloseSend() + if _, err = stream.Recv(); err != io.EOF { + t.Fatalf("unexpected error: %v, expected an EOF error", err) + } + + if csbl.mml.events != 8 { + t.Fatalf("want 8 client side binary logging events, got %v", csbl.mml.events) + } + if ssbl.mml.events != 8 { + t.Fatalf("want 8 server side binary logging events, got %v", ssbl.mml.events) + } +} diff --git a/test/go_vet/vet.go b/test/go_vet/vet.go deleted file mode 100644 index 475e8d683fc3..000000000000 --- a/test/go_vet/vet.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// vet checks whether files that are supposed to be built on appengine running -// Go 1.10 or earlier import an unsupported package (e.g. "unsafe", "syscall"). -package main - -import ( - "fmt" - "go/build" - "os" -) - -func main() { - fail := false - b := build.Default - b.BuildTags = []string{"appengine", "appenginevm"} - argsWithoutProg := os.Args[1:] - for _, dir := range argsWithoutProg { - p, err := b.Import(".", dir, 0) - if _, ok := err.(*build.NoGoError); ok { - continue - } else if err != nil { - fmt.Printf("build.Import failed due to %v\n", err) - fail = true - continue - } - for _, pkg := range p.Imports { - if pkg == "syscall" || pkg == "unsafe" { - fmt.Printf("Package %s/%s importing %s package without appengine build tag is NOT ALLOWED!\n", p.Dir, p.Name, pkg) - fail = true - } - } - } - if fail { - os.Exit(1) - } -} diff --git a/test/goaway_test.go b/test/goaway_test.go index 6ef11e26419d..c44bb831b70b 100644 --- a/test/goaway_test.go +++ b/test/goaway_test.go @@ -20,14 +20,29 @@ package test import ( "context" + "io" "net" + "strings" "testing" "time" + "golang.org/x/net/http2" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/keepalive" - testpb "google.golang.org/grpc/test/grpc_testing" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) // TestGracefulClientOnGoAway attempts to ensure that when the server sends a @@ -49,7 +64,7 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) { s := grpc.NewServer(grpc.KeepaliveParams(keepalive.ServerParameters{MaxConnectionAge: maxConnAge})) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -57,12 +72,12 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) { } go s.Serve(lis) - cc, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("Failed to dial server: %v", err) } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) endTime := time.Now().Add(testTime) for time.Now().Before(endTime) { @@ -73,3 +88,675 @@ func (s) TestGracefulClientOnGoAway(t *testing.T) { cancel() } } + +func (s) TestDetailedGoAwayErrorOnGracefulClosePropagatesToRPCError(t *testing.T) { + rpcDoneOnClient := make(chan struct{}) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + <-rpcDoneOnClient + return status.Error(codes.Internal, "arbitrary status") + }, + } + sopts := []grpc.ServerOption{ + grpc.KeepaliveParams(keepalive.ServerParameters{ + MaxConnectionAge: time.Millisecond * 100, + MaxConnectionAgeGrace: time.Nanosecond, // ~instantaneously, but non-zero to avoid default + }), + } + if err := ss.Start(sopts); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) + } + const expectedErrorMessageSubstring = "received prior goaway: code: NO_ERROR" + _, err = stream.Recv() + close(rpcDoneOnClient) + if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { + t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: %q", stream, err, expectedErrorMessageSubstring) + } +} + +func (s) TestDetailedGoAwayErrorOnAbruptClosePropagatesToRPCError(t *testing.T) { + grpctest.TLogger.ExpectError("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\"") + // set the min keepalive time very low so that this test can take + // a reasonable amount of time + prev := internal.KeepaliveMinPingTime + internal.KeepaliveMinPingTime = time.Millisecond + defer func() { internal.KeepaliveMinPingTime = prev }() + + rpcDoneOnClient := make(chan struct{}) + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + <-rpcDoneOnClient + return status.Error(codes.Internal, "arbitrary status") + }, + } + sopts := []grpc.ServerOption{ + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: time.Second * 1000, /* arbitrary, large value */ + }), + } + dopts := []grpc.DialOption{ + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: time.Millisecond, /* should trigger "too many pings" error quickly */ + Timeout: time.Second * 1000, /* arbitrary, large value */ + PermitWithoutStream: false, + }), + } + if err := ss.Start(sopts, dopts...); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall = _, %v, want _, ", ss.Client, err) + } + const expectedErrorMessageSubstring = `received prior goaway: code: ENHANCE_YOUR_CALM, debug data: "too_many_pings"` + _, err = stream.Recv() + close(rpcDoneOnClient) + if err == nil || !strings.Contains(err.Error(), expectedErrorMessageSubstring) { + t.Fatalf("%v.Recv() = _, %v, want _, rpc error containing substring: |%v|", stream, err, expectedErrorMessageSubstring) + } +} + +func (s) TestClientConnCloseAfterGoAwayWithActiveStream(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testClientConnCloseAfterGoAwayWithActiveStream(t, e) + } +} + +func testClientConnCloseAfterGoAwayWithActiveStream(t *testing.T, e env) { + te := newTest(t, e) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if _, err := tc.FullDuplexCall(ctx); err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want _, ", tc, err) + } + done := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(done) + }() + time.Sleep(50 * time.Millisecond) + cc.Close() + timeout := time.NewTimer(time.Second) + select { + case <-done: + case <-timeout.C: + t.Fatalf("Test timed-out.") + } +} + +func (s) TestServerGoAway(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerGoAway(t, e) + } +} + +func testServerGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + // Finish an RPC to make sure the connection is good. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, ", err) + } + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil && status.Code(err) != codes.DeadlineExceeded { + cancel() + break + } + cancel() + } + // A new RPC should fail. + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable && status.Code(err) != codes.Internal { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s or %s", err, codes.Unavailable, codes.Internal) + } + <-ch + awaitNewConnLogOutput() +} + +func (s) TestServerGoAwayPendingRPC(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerGoAwayPendingRPC(t, e) + } +} + +func testServerGoAwayPendingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + start := time.Now() + errored := false + for time.Since(start) < time.Second { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) + cancel() + if err != nil { + errored = true + break + } + } + if !errored { + t.Fatalf("GoAway never received by client") + } + respParam := []*testpb.ResponseParameters{{Size: 1}} + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + // The existing RPC should be still good to proceed. + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(_) = %v, want ", stream, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + // The RPC will run until canceled. + cancel() + <-ch + awaitNewConnLogOutput() +} + +func (s) TestServerMultipleGoAwayPendingRPC(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testServerMultipleGoAwayPendingRPC(t, e) + } +} + +func testServerMultipleGoAwayPendingRPC(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithCancel(context.Background()) + stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch1 := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch1) + }() + ch2 := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch2) + }() + // Loop until the server side GoAway signal is propagated to the client. + + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + cancel() + break + } + cancel() + } + select { + case <-ch1: + t.Fatal("GracefulStop() terminated early") + case <-ch2: + t.Fatal("GracefulStop() terminated early") + default: + } + respParam := []*testpb.ResponseParameters{ + { + Size: 1, + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + // The existing RPC should be still good to proceed. + if err := stream.Send(req); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, req, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + if err := stream.CloseSend(); err != nil { + t.Fatalf("%v.CloseSend() = %v, want ", stream, err) + } + + <-ch1 + <-ch2 + cancel() + awaitNewConnLogOutput() +} + +func (s) TestConcurrentClientConnCloseAndServerGoAway(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testConcurrentClientConnCloseAndServerGoAway(t, e) + } +} + +func testConcurrentClientConnCloseAndServerGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + ch := make(chan struct{}) + // Close ClientConn and Server concurrently. + go func() { + te.srv.GracefulStop() + close(ch) + }() + go func() { + cc.Close() + }() + <-ch +} + +func (s) TestConcurrentServerStopAndGoAway(t *testing.T) { + for _, e := range listTestEnv() { + if e.name == "handler-tls" { + continue + } + testConcurrentServerStopAndGoAway(t, e) + } +} + +func testConcurrentServerStopAndGoAway(t *testing.T, e env) { + te := newTest(t, e) + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + ) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + + // Finish an RPC to make sure the connection is good. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("%v.EmptyCall(_, _, _) = _, %v, want _, ", tc, err) + } + + ch := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(ch) + }() + // Loop until the server side GoAway signal is propagated to the client. + for { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + cancel() + break + } + cancel() + } + // Stop the server and close all the connections. + te.srv.Stop() + respParam := []*testpb.ResponseParameters{ + { + Size: 1, + }, + } + payload, err := newPayload(testpb.PayloadType_COMPRESSABLE, int32(100)) + if err != nil { + t.Fatal(err) + } + req := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: payload, + } + sendStart := time.Now() + for { + if err := stream.Send(req); err == io.EOF { + // stream.Send should eventually send io.EOF + break + } else if err != nil { + // Send should never return a transport-level error. + t.Fatalf("stream.Send(%v) = %v; want ", req, err) + } + if time.Since(sendStart) > 2*time.Second { + t.Fatalf("stream.Send(_) did not return io.EOF after 2s") + } + time.Sleep(time.Millisecond) + } + if _, err := stream.Recv(); err == nil || err == io.EOF { + t.Fatalf("%v.Recv() = _, %v, want _, ", stream, err) + } + <-ch + awaitNewConnLogOutput() +} + +// Proxies typically send GO_AWAY followed by connection closure a minute or so later. This +// test ensures that the connection is re-created after GO_AWAY and not affected by the +// subsequent (old) connection closure. +func (s) TestGoAwayThenClose(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + lis1, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Error while listening. Err: %v", err) + } + s1 := grpc.NewServer() + defer s1.Stop() + ts := &funcServer{ + unaryCall: func(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil + }, + fullDuplexCall: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { + t.Errorf("unexpected error from send: %v", err) + return err + } + // Wait forever. + _, err := stream.Recv() + if err == nil { + t.Error("expected to never receive any message") + } + return err + }, + } + testgrpc.RegisterTestServiceServer(s1, ts) + go s1.Serve(lis1) + + conn2Established := grpcsync.NewEvent() + lis2, err := listenWithNotifyingListener("tcp", "localhost:0", conn2Established) + if err != nil { + t.Fatalf("Error while listening. Err: %v", err) + } + s2 := grpc.NewServer() + defer s2.Stop() + testgrpc.RegisterTestServiceServer(s2, ts) + + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{Addresses: []resolver.Address{ + {Addr: lis1.Addr().String()}, + {Addr: lis2.Addr().String()}, + }}) + cc, err := grpc.DialContext(ctx, r.Scheme()+":///", grpc.WithResolvers(r), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Error creating client: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + + t.Log("Waiting for the ClientConn to enter READY state.") + awaitState(ctx, t, cc, connectivity.Ready) + + // We make a streaming RPC and do an one-message-round-trip to make sure + // it's created on connection 1. + // + // We use a long-lived RPC because it will cause GracefulStop to send + // GO_AWAY, but the connection won't get closed until the server stops and + // the client receives the error. + t.Log("Creating first streaming RPC to server 1.") + stream, err := client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("FullDuplexCall(_) = _, %v; want _, nil", err) + } + if _, err = stream.Recv(); err != nil { + t.Fatalf("unexpected error from first recv: %v", err) + } + + go s2.Serve(lis2) + + t.Log("Gracefully stopping server 1.") + go s1.GracefulStop() + + t.Log("Waiting for the ClientConn to enter IDLE state.") + awaitState(ctx, t, cc, connectivity.Idle) + + t.Log("Performing another RPC to create a connection to server 2.") + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) + } + + t.Log("Waiting for a connection to server 2.") + select { + case <-conn2Established.Done(): + case <-ctx.Done(): + t.Fatalf("timed out waiting for connection 2 to be established") + } + + // Close the listener for server2 to prevent it from allowing new connections. + lis2.Close() + + t.Log("Hard closing connection 1.") + s1.Stop() + + t.Log("Waiting for the first stream to error.") + if _, err = stream.Recv(); err == nil { + t.Fatal("expected the stream to die, but got a successful Recv") + } + + t.Log("Ensuring connection 2 is stable.") + for i := 0; i < 10; i++ { + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + t.Fatalf("UnaryCall(_) = _, %v; want _, nil", err) + } + } +} + +// TestGoAwayStreamIDSmallerThanCreatedStreams tests the scenario where a server +// sends a goaway with a stream id that is smaller than some created streams on +// the client, while the client is simultaneously creating new streams. This +// should not induce a deadlock. +func (s) TestGoAwayStreamIDSmallerThanCreatedStreams(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("error listening: %v", err) + } + + ctCh := testutils.NewChannel() + go func() { + conn, err := lis.Accept() + if err != nil { + t.Errorf("error in lis.Accept(): %v", err) + } + ct := newClientTester(t, conn) + ctCh.Send(ct) + }() + + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + val, err := ctCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for client transport (should be given after http2 creation)") + } + ct := val.(*clientTester) + + tc := testgrpc.NewTestServiceClient(cc) + someStreamsCreated := grpcsync.NewEvent() + goAwayWritten := grpcsync.NewEvent() + go func() { + for i := 0; i < 20; i++ { + if i == 10 { + <-goAwayWritten.Done() + } + tc.FullDuplexCall(ctx) + if i == 4 { + someStreamsCreated.Fire() + } + } + }() + + <-someStreamsCreated.Done() + ct.writeGoAway(1, http2.ErrCodeNo, []byte{}) + goAwayWritten.Fire() +} + +// TestTwoGoAwayPingFrames tests the scenario where you get two go away ping +// frames from the client during graceful shutdown. This should not crash the +// server. +func (s) TestTwoGoAwayPingFrames(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", err) + } + defer lis.Close() + s := grpc.NewServer() + defer s.Stop() + go s.Serve(lis) + + conn, err := net.DialTimeout("tcp", lis.Addr().String(), defaultTestTimeout) + if err != nil { + t.Fatalf("Failed to dial: %v", err) + } + + st := newServerTesterFromConn(t, conn) + st.greet() + pingReceivedClientSide := testutils.NewChannel() + go func() { + for { + f, err := st.readFrame() + if err != nil { + return + } + switch f.(type) { + case *http2.GoAwayFrame: + case *http2.PingFrame: + pingReceivedClientSide.Send(nil) + default: + t.Errorf("server tester received unexpected frame type %T", f) + } + } + }() + gsDone := testutils.NewChannel() + go func() { + s.GracefulStop() + gsDone.Send(nil) + }() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := pingReceivedClientSide.Receive(ctx); err != nil { + t.Fatalf("Error waiting for ping frame client side from graceful shutdown: %v", err) + } + // Write two goaway pings here. + st.writePing(true, [8]byte{1, 6, 1, 8, 0, 3, 3, 9}) + st.writePing(true, [8]byte{1, 6, 1, 8, 0, 3, 3, 9}) + // Close the conn to finish up the Graceful Shutdown process. + conn.Close() + if _, err := gsDone.Receive(ctx); err != nil { + t.Fatalf("Error waiting for graceful shutdown of the server: %v", err) + } +} diff --git a/test/gracefulstop_test.go b/test/gracefulstop_test.go index 6058fb8b333c..51bb132e4944 100644 --- a/test/gracefulstop_test.go +++ b/test/gracefulstop_test.go @@ -26,11 +26,15 @@ import ( "testing" "time" + "golang.org/x/net/http2" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type delayListener struct { @@ -109,7 +113,7 @@ func (s) TestGracefulStop(t *testing.T) { d := func(ctx context.Context, _ string) (net.Conn, error) { return dlis.Dial(ctx) } ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { _, err := stream.Recv() if err != nil { return err @@ -118,7 +122,7 @@ func (s) TestGracefulStop(t *testing.T) { }, } s := grpc.NewServer() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) // 1. Start Server wg := sync.WaitGroup{} @@ -146,11 +150,11 @@ func (s) TestGracefulStop(t *testing.T) { // even though GracefulStop has closed the listener. ctx, dialCancel := context.WithTimeout(context.Background(), 5*time.Second) defer dialCancel() - cc, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(d)) + cc, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(d)) if err != nil { t.Fatalf("grpc.DialContext(_, %q, _) = %v", lis.Addr().String(), err) } - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) defer cc.Close() // 4. Send an RPC on the new connection. @@ -163,3 +167,53 @@ func (s) TestGracefulStop(t *testing.T) { cancel() wg.Wait() } + +func (s) TestGracefulStopClosesConnAfterLastStream(t *testing.T) { + // This test ensures that a server closes the connections to its clients + // when the final stream has completed after a GOAWAY. + + handlerCalled := make(chan struct{}) + gracefulStopCalled := make(chan struct{}) + + ts := &funcServer{streamingInputCall: func(stream testgrpc.TestService_StreamingInputCallServer) error { + close(handlerCalled) // Initiate call to GracefulStop. + <-gracefulStopCalled // Wait for GOAWAYs to be received by the client. + return nil + }} + + te := newTest(t, tcpClearEnv) + te.startServer(ts) + defer te.tearDown() + + te.withServerTester(func(st *serverTester) { + st.writeHeadersGRPC(1, "/grpc.testing.TestService/StreamingInputCall", false) + + <-handlerCalled // Wait for the server to invoke its handler. + + // Gracefully stop the server. + gracefulStopDone := make(chan struct{}) + go func() { + te.srv.GracefulStop() + close(gracefulStopDone) + }() + st.wantGoAway(http2.ErrCodeNo) // Server sends a GOAWAY due to GracefulStop. + pf := st.wantPing() // Server sends a ping to verify client receipt. + st.writePing(true, pf.Data) // Send ping ack to confirm. + st.wantGoAway(http2.ErrCodeNo) // Wait for subsequent GOAWAY to indicate no new stream processing. + + close(gracefulStopCalled) // Unblock server handler. + + fr := st.wantAnyFrame() // Wait for trailer. + hdr, ok := fr.(*http2.MetaHeadersFrame) + if !ok { + t.Fatalf("Received unexpected frame of type (%T) from server: %v; want HEADERS", fr, fr) + } + if !hdr.StreamEnded() { + t.Fatalf("Received unexpected HEADERS frame from server: %v; want END_STREAM set", fr) + } + + st.wantRSTStream(http2.ErrCodeNo) // Server should send RST_STREAM because client did not half-close. + + <-gracefulStopDone // Wait for GracefulStop to return. + }) +} diff --git a/test/grpc_testing/test.pb.go b/test/grpc_testing/test.pb.go deleted file mode 100644 index 89ebb3420f24..000000000000 --- a/test/grpc_testing/test.pb.go +++ /dev/null @@ -1,929 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: test/grpc_testing/test.proto - -package grpc_testing - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The type of payload that should be returned. -type PayloadType int32 - -const ( - // Compressable text format. - PayloadType_COMPRESSABLE PayloadType = 0 - // Uncompressable binary format. - PayloadType_UNCOMPRESSABLE PayloadType = 1 - // Randomly chosen from all other formats defined in this enum. - PayloadType_RANDOM PayloadType = 2 -) - -// Enum value maps for PayloadType. -var ( - PayloadType_name = map[int32]string{ - 0: "COMPRESSABLE", - 1: "UNCOMPRESSABLE", - 2: "RANDOM", - } - PayloadType_value = map[string]int32{ - "COMPRESSABLE": 0, - "UNCOMPRESSABLE": 1, - "RANDOM": 2, - } -) - -func (x PayloadType) Enum() *PayloadType { - p := new(PayloadType) - *p = x - return p -} - -func (x PayloadType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (PayloadType) Descriptor() protoreflect.EnumDescriptor { - return file_test_grpc_testing_test_proto_enumTypes[0].Descriptor() -} - -func (PayloadType) Type() protoreflect.EnumType { - return &file_test_grpc_testing_test_proto_enumTypes[0] -} - -func (x PayloadType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use PayloadType.Descriptor instead. -func (PayloadType) EnumDescriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{0} -} - -type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Empty) Reset() { - *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Empty) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Empty) ProtoMessage() {} - -func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Empty.ProtoReflect.Descriptor instead. -func (*Empty) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{0} -} - -// A block of data, to simply increase gRPC message size. -type Payload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The type of data in body. - Type PayloadType `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.testing.PayloadType" json:"type,omitempty"` - // Primary contents of payload. - Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` -} - -func (x *Payload) Reset() { - *x = Payload{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Payload) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Payload) ProtoMessage() {} - -func (x *Payload) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Payload.ProtoReflect.Descriptor instead. -func (*Payload) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{1} -} - -func (x *Payload) GetType() PayloadType { - if x != nil { - return x.Type - } - return PayloadType_COMPRESSABLE -} - -func (x *Payload) GetBody() []byte { - if x != nil { - return x.Body - } - return nil -} - -// Unary request. -type SimpleRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize,proto3" json:"response_size,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` - // Whether SimpleResponse should include username. - FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername,proto3" json:"fill_username,omitempty"` - // Whether SimpleResponse should include OAuth scope. - FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope,proto3" json:"fill_oauth_scope,omitempty"` -} - -func (x *SimpleRequest) Reset() { - *x = SimpleRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SimpleRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SimpleRequest) ProtoMessage() {} - -func (x *SimpleRequest) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SimpleRequest.ProtoReflect.Descriptor instead. -func (*SimpleRequest) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{2} -} - -func (x *SimpleRequest) GetResponseType() PayloadType { - if x != nil { - return x.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (x *SimpleRequest) GetResponseSize() int32 { - if x != nil { - return x.ResponseSize - } - return 0 -} - -func (x *SimpleRequest) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -func (x *SimpleRequest) GetFillUsername() bool { - if x != nil { - return x.FillUsername - } - return false -} - -func (x *SimpleRequest) GetFillOauthScope() bool { - if x != nil { - return x.FillOauthScope - } - return false -} - -// Unary response, as configured by the request. -type SimpleResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Payload to increase message size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` - // The user the request came from, for verifying authentication was - // successful when the client expected it. - Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` - // OAuth scope. - OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope,proto3" json:"oauth_scope,omitempty"` -} - -func (x *SimpleResponse) Reset() { - *x = SimpleResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SimpleResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SimpleResponse) ProtoMessage() {} - -func (x *SimpleResponse) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SimpleResponse.ProtoReflect.Descriptor instead. -func (*SimpleResponse) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{3} -} - -func (x *SimpleResponse) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -func (x *SimpleResponse) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *SimpleResponse) GetOauthScope() string { - if x != nil { - return x.OauthScope - } - return "" -} - -// Client-streaming request. -type StreamingInputCallRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (x *StreamingInputCallRequest) Reset() { - *x = StreamingInputCallRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingInputCallRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingInputCallRequest) ProtoMessage() {} - -func (x *StreamingInputCallRequest) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingInputCallRequest.ProtoReflect.Descriptor instead. -func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{4} -} - -func (x *StreamingInputCallRequest) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -// Client-streaming response. -type StreamingInputCallResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Aggregated size of payloads received from the client. - AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize,proto3" json:"aggregated_payload_size,omitempty"` -} - -func (x *StreamingInputCallResponse) Reset() { - *x = StreamingInputCallResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingInputCallResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingInputCallResponse) ProtoMessage() {} - -func (x *StreamingInputCallResponse) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingInputCallResponse.ProtoReflect.Descriptor instead. -func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{5} -} - -func (x *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { - if x != nil { - return x.AggregatedPayloadSize - } - return 0 -} - -// Configuration for a particular response. -type ResponseParameters struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - Size int32 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` - // Desired interval between consecutive responses in the response stream in - // microseconds. - IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs,proto3" json:"interval_us,omitempty"` -} - -func (x *ResponseParameters) Reset() { - *x = ResponseParameters{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseParameters) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseParameters) ProtoMessage() {} - -func (x *ResponseParameters) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseParameters.ProtoReflect.Descriptor instead. -func (*ResponseParameters) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{6} -} - -func (x *ResponseParameters) GetSize() int32 { - if x != nil { - return x.Size - } - return 0 -} - -func (x *ResponseParameters) GetIntervalUs() int32 { - if x != nil { - return x.IntervalUs - } - return 0 -} - -// Server-streaming request. -type StreamingOutputCallRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,proto3,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Configuration for each expected response message. - ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters,proto3" json:"response_parameters,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (x *StreamingOutputCallRequest) Reset() { - *x = StreamingOutputCallRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingOutputCallRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingOutputCallRequest) ProtoMessage() {} - -func (x *StreamingOutputCallRequest) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingOutputCallRequest.ProtoReflect.Descriptor instead. -func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{7} -} - -func (x *StreamingOutputCallRequest) GetResponseType() PayloadType { - if x != nil { - return x.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (x *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { - if x != nil { - return x.ResponseParameters - } - return nil -} - -func (x *StreamingOutputCallRequest) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -// Server-streaming response, as configured by the request and parameters. -type StreamingOutputCallResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Payload to increase response size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` -} - -func (x *StreamingOutputCallResponse) Reset() { - *x = StreamingOutputCallResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_test_grpc_testing_test_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StreamingOutputCallResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StreamingOutputCallResponse) ProtoMessage() {} - -func (x *StreamingOutputCallResponse) ProtoReflect() protoreflect.Message { - mi := &file_test_grpc_testing_test_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StreamingOutputCallResponse.ProtoReflect.Descriptor instead. -func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { - return file_test_grpc_testing_test_proto_rawDescGZIP(), []int{8} -} - -func (x *StreamingOutputCallResponse) GetPayload() *Payload { - if x != nil { - return x.Payload - } - return nil -} - -var File_test_grpc_testing_test_proto protoreflect.FileDescriptor - -var file_test_grpc_testing_test_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x74, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x67, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x22, 0x07, 0x0a, 0x05, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4c, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x12, 0x2d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, - 0x6f, 0x64, 0x79, 0x22, 0xf4, 0x01, 0x0a, 0x0d, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x66, - 0x69, 0x6c, 0x6c, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0c, 0x66, 0x69, 0x6c, 0x6c, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x6c, - 0x4f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x22, 0x7e, 0x0a, 0x0e, 0x53, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x22, 0x4c, 0x0a, 0x19, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x54, 0x0a, 0x1a, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x49, - 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x55, 0x73, 0x22, 0xe0, 0x01, 0x0a, 0x1a, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x19, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4e, 0x0a, 0x1b, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, - 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2a, 0x3f, 0x0a, 0x0b, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x43, - 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x12, 0x0a, - 0x0e, 0x55, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x52, 0x45, 0x53, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, - 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x02, 0x32, 0xbb, 0x04, - 0x0a, 0x0b, 0x54, 0x65, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x35, 0x0a, - 0x09, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x13, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x13, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x09, 0x55, 0x6e, 0x61, 0x72, 0x79, 0x43, 0x61, 0x6c, - 0x6c, 0x12, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x13, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, - 0x61, 0x6c, 0x6c, 0x12, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x69, 0x0a, 0x12, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, - 0x12, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, - 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, - 0x6e, 0x67, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x28, 0x01, 0x12, 0x69, 0x0a, 0x0e, 0x46, 0x75, 0x6c, 0x6c, 0x44, 0x75, 0x70, - 0x6c, 0x65, 0x78, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, - 0x12, 0x69, 0x0a, 0x0e, 0x48, 0x61, 0x6c, 0x66, 0x44, 0x75, 0x70, 0x6c, 0x65, 0x78, 0x43, 0x61, - 0x6c, 0x6c, 0x12, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x43, 0x61, 0x6c, 0x6c, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x2a, 0x5a, 0x28, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_test_grpc_testing_test_proto_rawDescOnce sync.Once - file_test_grpc_testing_test_proto_rawDescData = file_test_grpc_testing_test_proto_rawDesc -) - -func file_test_grpc_testing_test_proto_rawDescGZIP() []byte { - file_test_grpc_testing_test_proto_rawDescOnce.Do(func() { - file_test_grpc_testing_test_proto_rawDescData = protoimpl.X.CompressGZIP(file_test_grpc_testing_test_proto_rawDescData) - }) - return file_test_grpc_testing_test_proto_rawDescData -} - -var file_test_grpc_testing_test_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_test_grpc_testing_test_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_test_grpc_testing_test_proto_goTypes = []interface{}{ - (PayloadType)(0), // 0: grpc.testing.PayloadType - (*Empty)(nil), // 1: grpc.testing.Empty - (*Payload)(nil), // 2: grpc.testing.Payload - (*SimpleRequest)(nil), // 3: grpc.testing.SimpleRequest - (*SimpleResponse)(nil), // 4: grpc.testing.SimpleResponse - (*StreamingInputCallRequest)(nil), // 5: grpc.testing.StreamingInputCallRequest - (*StreamingInputCallResponse)(nil), // 6: grpc.testing.StreamingInputCallResponse - (*ResponseParameters)(nil), // 7: grpc.testing.ResponseParameters - (*StreamingOutputCallRequest)(nil), // 8: grpc.testing.StreamingOutputCallRequest - (*StreamingOutputCallResponse)(nil), // 9: grpc.testing.StreamingOutputCallResponse -} -var file_test_grpc_testing_test_proto_depIdxs = []int32{ - 0, // 0: grpc.testing.Payload.type:type_name -> grpc.testing.PayloadType - 0, // 1: grpc.testing.SimpleRequest.response_type:type_name -> grpc.testing.PayloadType - 2, // 2: grpc.testing.SimpleRequest.payload:type_name -> grpc.testing.Payload - 2, // 3: grpc.testing.SimpleResponse.payload:type_name -> grpc.testing.Payload - 2, // 4: grpc.testing.StreamingInputCallRequest.payload:type_name -> grpc.testing.Payload - 0, // 5: grpc.testing.StreamingOutputCallRequest.response_type:type_name -> grpc.testing.PayloadType - 7, // 6: grpc.testing.StreamingOutputCallRequest.response_parameters:type_name -> grpc.testing.ResponseParameters - 2, // 7: grpc.testing.StreamingOutputCallRequest.payload:type_name -> grpc.testing.Payload - 2, // 8: grpc.testing.StreamingOutputCallResponse.payload:type_name -> grpc.testing.Payload - 1, // 9: grpc.testing.TestService.EmptyCall:input_type -> grpc.testing.Empty - 3, // 10: grpc.testing.TestService.UnaryCall:input_type -> grpc.testing.SimpleRequest - 8, // 11: grpc.testing.TestService.StreamingOutputCall:input_type -> grpc.testing.StreamingOutputCallRequest - 5, // 12: grpc.testing.TestService.StreamingInputCall:input_type -> grpc.testing.StreamingInputCallRequest - 8, // 13: grpc.testing.TestService.FullDuplexCall:input_type -> grpc.testing.StreamingOutputCallRequest - 8, // 14: grpc.testing.TestService.HalfDuplexCall:input_type -> grpc.testing.StreamingOutputCallRequest - 1, // 15: grpc.testing.TestService.EmptyCall:output_type -> grpc.testing.Empty - 4, // 16: grpc.testing.TestService.UnaryCall:output_type -> grpc.testing.SimpleResponse - 9, // 17: grpc.testing.TestService.StreamingOutputCall:output_type -> grpc.testing.StreamingOutputCallResponse - 6, // 18: grpc.testing.TestService.StreamingInputCall:output_type -> grpc.testing.StreamingInputCallResponse - 9, // 19: grpc.testing.TestService.FullDuplexCall:output_type -> grpc.testing.StreamingOutputCallResponse - 9, // 20: grpc.testing.TestService.HalfDuplexCall:output_type -> grpc.testing.StreamingOutputCallResponse - 15, // [15:21] is the sub-list for method output_type - 9, // [9:15] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name -} - -func init() { file_test_grpc_testing_test_proto_init() } -func file_test_grpc_testing_test_proto_init() { - if File_test_grpc_testing_test_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_test_grpc_testing_test_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Payload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SimpleRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SimpleResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingInputCallRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingInputCallResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseParameters); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingOutputCallRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_test_grpc_testing_test_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamingOutputCallResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_test_grpc_testing_test_proto_rawDesc, - NumEnums: 1, - NumMessages: 9, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_test_grpc_testing_test_proto_goTypes, - DependencyIndexes: file_test_grpc_testing_test_proto_depIdxs, - EnumInfos: file_test_grpc_testing_test_proto_enumTypes, - MessageInfos: file_test_grpc_testing_test_proto_msgTypes, - }.Build() - File_test_grpc_testing_test_proto = out.File - file_test_grpc_testing_test_proto_rawDesc = nil - file_test_grpc_testing_test_proto_goTypes = nil - file_test_grpc_testing_test_proto_depIdxs = nil -} diff --git a/test/grpc_testing/test.proto b/test/grpc_testing/test.proto deleted file mode 100644 index 0c6650401d59..000000000000 --- a/test/grpc_testing/test.proto +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. -syntax = "proto3"; - -option go_package = "google.golang.org/grpc/test/grpc_testing"; - -package grpc.testing; - -message Empty {} - -// The type of payload that should be returned. -enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; - - // Uncompressable binary format. - UNCOMPRESSABLE = 1; - - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; -} - -// A block of data, to simply increase gRPC message size. -message Payload { - // The type of data in body. - PayloadType type = 1; - // Primary contents of payload. - bytes body = 2; -} - -// Unary request. -message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - PayloadType response_type = 1; - - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - int32 response_size = 2; - - // Optional input payload sent along with the request. - Payload payload = 3; - - // Whether SimpleResponse should include username. - bool fill_username = 4; - - // Whether SimpleResponse should include OAuth scope. - bool fill_oauth_scope = 5; -} - -// Unary response, as configured by the request. -message SimpleResponse { - // Payload to increase message size. - Payload payload = 1; - - // The user the request came from, for verifying authentication was - // successful when the client expected it. - string username = 2; - - // OAuth scope. - string oauth_scope = 3; -} - -// Client-streaming request. -message StreamingInputCallRequest { - // Optional input payload sent along with the request. - Payload payload = 1; - - // Not expecting any payload from the response. -} - -// Client-streaming response. -message StreamingInputCallResponse { - // Aggregated size of payloads received from the client. - int32 aggregated_payload_size = 1; -} - -// Configuration for a particular response. -message ResponseParameters { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - int32 size = 1; - - // Desired interval between consecutive responses in the response stream in - // microseconds. - int32 interval_us = 2; -} - -// Server-streaming request. -message StreamingOutputCallRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - PayloadType response_type = 1; - - // Configuration for each expected response message. - repeated ResponseParameters response_parameters = 2; - - // Optional input payload sent along with the request. - Payload payload = 3; -} - -// Server-streaming response, as configured by the request and parameters. -message StreamingOutputCallResponse { - // Payload to increase response size. - Payload payload = 1; -} - -// A simple service to test the various types of RPCs and experiment with -// performance with various types of payload. -service TestService { - // One empty request followed by one empty response. - rpc EmptyCall(Empty) returns (Empty); - - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - rpc StreamingOutputCall(StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - rpc StreamingInputCall(stream StreamingInputCallRequest) - returns (StreamingInputCallResponse); - - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - rpc FullDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - rpc HalfDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); -} diff --git a/test/grpc_testing/test_grpc.pb.go b/test/grpc_testing/test_grpc.pb.go deleted file mode 100644 index ab3b68a92bcc..000000000000 --- a/test/grpc_testing/test_grpc.pb.go +++ /dev/null @@ -1,435 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. - -package grpc_testing - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// TestServiceClient is the client API for TestService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type TestServiceClient interface { - // One empty request followed by one empty response. - EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) -} - -type testServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { - return &testServiceClient{cc} -} - -func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { - out := new(SimpleResponse) - err := c.cc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[0], "/grpc.testing.TestService/StreamingOutputCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingOutputCallClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type TestService_StreamingOutputCallClient interface { - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingOutputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[1], "/grpc.testing.TestService/StreamingInputCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingInputCallClient{stream} - return x, nil -} - -type TestService_StreamingInputCallClient interface { - Send(*StreamingInputCallRequest) error - CloseAndRecv() (*StreamingInputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingInputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(StreamingInputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[2], "/grpc.testing.TestService/FullDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceFullDuplexCallClient{stream} - return x, nil -} - -type TestService_FullDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceFullDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := c.cc.NewStream(ctx, &TestService_ServiceDesc.Streams[3], "/grpc.testing.TestService/HalfDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceHalfDuplexCallClient{stream} - return x, nil -} - -type TestService_HalfDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceHalfDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TestServiceServer is the server API for TestService service. -// All implementations must embed UnimplementedTestServiceServer -// for forward compatibility -type TestServiceServer interface { - // One empty request followed by one empty response. - EmptyCall(context.Context, *Empty) (*Empty, error) - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(TestService_StreamingInputCallServer) error - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(TestService_FullDuplexCallServer) error - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(TestService_HalfDuplexCallServer) error - mustEmbedUnimplementedTestServiceServer() -} - -// UnimplementedTestServiceServer must be embedded to have forward compatible implementations. -type UnimplementedTestServiceServer struct { -} - -func (UnimplementedTestServiceServer) EmptyCall(context.Context, *Empty) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method EmptyCall not implemented") -} -func (UnimplementedTestServiceServer) UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") -} -func (UnimplementedTestServiceServer) StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error { - return status.Errorf(codes.Unimplemented, "method StreamingOutputCall not implemented") -} -func (UnimplementedTestServiceServer) StreamingInputCall(TestService_StreamingInputCallServer) error { - return status.Errorf(codes.Unimplemented, "method StreamingInputCall not implemented") -} -func (UnimplementedTestServiceServer) FullDuplexCall(TestService_FullDuplexCallServer) error { - return status.Errorf(codes.Unimplemented, "method FullDuplexCall not implemented") -} -func (UnimplementedTestServiceServer) HalfDuplexCall(TestService_HalfDuplexCallServer) error { - return status.Errorf(codes.Unimplemented, "method HalfDuplexCall not implemented") -} -func (UnimplementedTestServiceServer) mustEmbedUnimplementedTestServiceServer() {} - -// UnsafeTestServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to TestServiceServer will -// result in compilation errors. -type UnsafeTestServiceServer interface { - mustEmbedUnimplementedTestServiceServer() -} - -func RegisterTestServiceServer(s grpc.ServiceRegistrar, srv TestServiceServer) { - s.RegisterService(&TestService_ServiceDesc, srv) -} - -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).EmptyCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.TestService/EmptyCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SimpleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).UnaryCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.TestService/UnaryCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StreamingOutputCallRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) -} - -type TestService_StreamingOutputCallServer interface { - Send(*StreamingOutputCallResponse) error - grpc.ServerStream -} - -type testServiceStreamingOutputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) -} - -type TestService_StreamingInputCallServer interface { - SendAndClose(*StreamingInputCallResponse) error - Recv() (*StreamingInputCallRequest, error) - grpc.ServerStream -} - -type testServiceStreamingInputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { - m := new(StreamingInputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) -} - -type TestService_FullDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceFullDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) -} - -type TestService_HalfDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceHalfDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TestService_ServiceDesc is the grpc.ServiceDesc for TestService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var TestService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.TestService", - HandlerType: (*TestServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "EmptyCall", - Handler: _TestService_EmptyCall_Handler, - }, - { - MethodName: "UnaryCall", - Handler: _TestService_UnaryCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamingOutputCall", - Handler: _TestService_StreamingOutputCall_Handler, - ServerStreams: true, - }, - { - StreamName: "StreamingInputCall", - Handler: _TestService_StreamingInputCall_Handler, - ClientStreams: true, - }, - { - StreamName: "FullDuplexCall", - Handler: _TestService_FullDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "HalfDuplexCall", - Handler: _TestService_HalfDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "test/grpc_testing/test.proto", -} diff --git a/test/healthcheck_test.go b/test/healthcheck_test.go index 99f7d8951ebd..a6865b803026 100644 --- a/test/healthcheck_test.go +++ b/test/healthcheck_test.go @@ -30,16 +30,19 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - _ "google.golang.org/grpc/health" - healthgrpc "google.golang.org/grpc/health/grpc_health_v1" - healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/health" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/resolver" "google.golang.org/grpc/resolver/manual" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) var testHealthCheckFunc = internal.HealthCheckFunc @@ -48,7 +51,7 @@ func newTestHealthServer() *testHealthServer { return newTestHealthServerWithWatchFunc(defaultWatchFunc) } -func newTestHealthServerWithWatchFunc(f func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error) *testHealthServer { +func newTestHealthServerWithWatchFunc(f healthWatchFunc) *testHealthServer { return &testHealthServer{ watchFunc: f, update: make(chan struct{}, 1), @@ -82,9 +85,11 @@ func defaultWatchFunc(s *testHealthServer, in *healthpb.HealthCheckRequest, stre return nil } +type healthWatchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error + type testHealthServer struct { - healthpb.UnimplementedHealthServer - watchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error + healthgrpc.UnimplementedHealthServer + watchFunc healthWatchFunc mu sync.Mutex status map[string]healthpb.HealthCheckResponse_ServingStatus update chan struct{} @@ -124,25 +129,26 @@ func setupHealthCheckWrapper() (hcEnterChan chan struct{}, hcExitChan chan struc return } -type svrConfig struct { - specialWatchFunc func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error -} +func setupServer(t *testing.T, watchFunc healthWatchFunc) (*grpc.Server, net.Listener, *testHealthServer) { + t.Helper() -func setupServer(sc *svrConfig) (s *grpc.Server, lis net.Listener, ts *testHealthServer, deferFunc func(), err error) { - s = grpc.NewServer() - lis, err = net.Listen("tcp", "localhost:0") + lis, err := net.Listen("tcp", "localhost:0") if err != nil { - return nil, nil, nil, func() {}, fmt.Errorf("failed to listen due to err %v", err) + t.Fatalf("net.Listen() failed: %v", err) } - if sc.specialWatchFunc != nil { - ts = newTestHealthServerWithWatchFunc(sc.specialWatchFunc) + + var ts *testHealthServer + if watchFunc != nil { + ts = newTestHealthServerWithWatchFunc(watchFunc) } else { ts = newTestHealthServer() } + s := grpc.NewServer() healthgrpc.RegisterHealthServer(s, ts) - testpb.RegisterTestServiceServer(s, &testServer{}) + testgrpc.RegisterTestServiceServer(s, &testServer{}) go s.Serve(lis) - return s, lis, ts, s.Stop, nil + t.Cleanup(func() { s.Stop() }) + return s, lis, ts } type clientConfig struct { @@ -151,28 +157,34 @@ type clientConfig struct { extraDialOption []grpc.DialOption } -func setupClient(c *clientConfig) (cc *grpc.ClientConn, r *manual.Resolver, deferFunc func(), err error) { - r = manual.NewBuilderWithScheme("whatever") - var opts []grpc.DialOption - opts = append(opts, grpc.WithInsecure(), grpc.WithResolvers(r), grpc.WithBalancerName(c.balancerName)) - if c.testHealthCheckFuncWrapper != nil { - opts = append(opts, internal.WithHealthCheckFunc.(func(internal.HealthChecker) grpc.DialOption)(c.testHealthCheckFuncWrapper)) +func setupClient(t *testing.T, c *clientConfig) (*grpc.ClientConn, *manual.Resolver) { + t.Helper() + + r := manual.NewBuilderWithScheme("whatever") + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + } + if c != nil { + if c.balancerName != "" { + opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingConfig": [{"%s":{}}]}`, c.balancerName))) + } + if c.testHealthCheckFuncWrapper != nil { + opts = append(opts, internal.WithHealthCheckFunc.(func(internal.HealthChecker) grpc.DialOption)(c.testHealthCheckFuncWrapper)) + } + opts = append(opts, c.extraDialOption...) } - opts = append(opts, c.extraDialOption...) - cc, err = grpc.Dial(r.Scheme()+":///test.server", opts...) - if err != nil { - return nil, nil, nil, fmt.Errorf("dial failed due to err: %v", err) + cc, err := grpc.Dial(r.Scheme()+":///test.server", opts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) } - return cc, r, func() { cc.Close() }, nil + t.Cleanup(func() { cc.Close() }) + return cc, r } func (s) TestHealthCheckWatchStateChange(t *testing.T) { - _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } + _, lis, ts := setupServer(t, nil) // The table below shows the expected series of addrConn connectivity transitions when server // updates its health status. As there's only one addrConn corresponds with the ClientConn in this @@ -188,59 +200,45 @@ func (s) TestHealthCheckWatchStateChange(t *testing.T) { //+------------------------------+-------------------------------------------+ ts.SetServingStatus("foo", healthpb.HealthCheckResponse_NOT_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + cc, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if ok := cc.WaitForStateChange(ctx, connectivity.Idle); !ok { - t.Fatal("ClientConn is still in IDLE state when the context times out.") - } - if ok := cc.WaitForStateChange(ctx, connectivity.Connecting); !ok { - t.Fatal("ClientConn is still in CONNECTING state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.Idle) + awaitNotState(ctx, t, cc, connectivity.Connecting) + awaitState(ctx, t, cc, connectivity.TransientFailure) if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - if ok := cc.WaitForStateChange(ctx, connectivity.TransientFailure); !ok { - t.Fatal("ClientConn is still in TRANSIENT FAILURE state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.TransientFailure) if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVICE_UNKNOWN) - if ok := cc.WaitForStateChange(ctx, connectivity.Ready); !ok { - t.Fatal("ClientConn is still in READY state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.Ready) if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - if ok := cc.WaitForStateChange(ctx, connectivity.TransientFailure); !ok { - t.Fatal("ClientConn is still in TRANSIENT FAILURE state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.TransientFailure) if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } ts.SetServingStatus("foo", healthpb.HealthCheckResponse_UNKNOWN) - if ok := cc.WaitForStateChange(ctx, connectivity.Ready); !ok { - t.Fatal("ClientConn is still in READY state when the context times out.") - } + awaitNotState(ctx, t, cc, connectivity.Ready) if s := cc.GetState(); s != connectivity.TransientFailure { t.Fatalf("ClientConn is in %v state, want TRANSIENT FAILURE", s) } @@ -257,28 +255,20 @@ func (s) TestHealthCheckHealthServerNotRegistered(t *testing.T) { go s.Serve(lis) defer s.Stop() - cc, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - + cc, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if ok := cc.WaitForStateChange(ctx, connectivity.Idle); !ok { - t.Fatal("ClientConn is still in IDLE state when the context times out.") - } - if ok := cc.WaitForStateChange(ctx, connectivity.Connecting); !ok { - t.Fatal("ClientConn is still in CONNECTING state when the context times out.") - } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitNotState(ctx, t, cc, connectivity.Idle) + awaitNotState(ctx, t, cc, connectivity.Connecting) if s := cc.GetState(); s != connectivity.Ready { t.Fatalf("ClientConn is in %v state, want READY", s) } @@ -287,37 +277,23 @@ func (s) TestHealthCheckHealthServerNotRegistered(t *testing.T) { // In the case of a goaway received, the health check stream should be terminated and health check // function should exit. func (s) TestHealthCheckWithGoAway(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - s, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - + s, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - tc := testpb.NewTestServiceClient(cc) + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - // make some rpcs to make sure connection is working. if err := verifyResultWithDelay(func() (bool, error) { if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); err != nil { @@ -379,33 +355,19 @@ func (s) TestHealthCheckWithGoAway(t *testing.T) { } func (s) TestHealthCheckWithConnClose(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - s, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - + s, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - tc := testpb.NewTestServiceClient(cc) - + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -443,30 +405,17 @@ func (s) TestHealthCheckWithConnClose(t *testing.T) { // addrConn drain happens when addrConn gets torn down due to its address being no longer in the // address list returned by the resolver. func (s) TestHealthCheckWithAddrConnDrain(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - + _, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - tc := testpb.NewTestServiceClient(cc) - sc := parseCfg(r, `{ + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + tc := testgrpc.NewTestServiceClient(cc) + sc := parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, @@ -536,32 +485,19 @@ func (s) TestHealthCheckWithAddrConnDrain(t *testing.T) { // ClientConn close will lead to its addrConns being torn down. func (s) TestHealthCheckWithClientConnClose(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - + _, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - tc := testpb.NewTestServiceClient(cc) + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -601,47 +537,34 @@ func (s) TestHealthCheckWithClientConnClose(t *testing.T) { // closes the skipReset channel(since it has not been closed inside health check func) to unblock // onGoAway/onClose goroutine. func (s) TestHealthCheckWithoutSetConnectivityStateCalledAddrConnShutDown(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - _, lis, ts, deferFunc, err := setupServer(&svrConfig{ - specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - if in.Service != "delay" { - return status.Error(codes.FailedPrecondition, - "this special Watch function only handles request with service name to be \"delay\"") - } - // Do nothing to mock a delay of health check response from server side. - // This case is to help with the test that covers the condition that setConnectivityState is not - // called inside HealthCheckFunc before the func returns. - select { - case <-stream.Context().Done(): - case <-time.After(5 * time.Second): - } - return nil - }, - }) - defer deferFunc() - if err != nil { - t.Fatal(err) + watchFunc := func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + if in.Service != "delay" { + return status.Error(codes.FailedPrecondition, + "this special Watch function only handles request with service name to be \"delay\"") + } + // Do nothing to mock a delay of health check response from server side. + // This case is to help with the test that covers the condition that setConnectivityState is not + // called inside HealthCheckFunc before the func returns. + select { + case <-stream.Context().Done(): + case <-time.After(5 * time.Second): + } + return nil } - + _, lis, ts := setupServer(t, watchFunc) ts.SetServingStatus("delay", healthpb.HealthCheckResponse_SERVING) - _, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + _, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until // test ends). - sc := parseCfg(r, `{ + sc := parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "delay" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, @@ -677,49 +600,36 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalledAddrConnShutDown(t *tes // closes the allowedToReset channel(since it has not been closed inside health check func) to unblock // onGoAway/onClose goroutine. func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { - hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - s, lis, ts, deferFunc, err := setupServer(&svrConfig{ - specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - if in.Service != "delay" { - return status.Error(codes.FailedPrecondition, - "this special Watch function only handles request with service name to be \"delay\"") - } - // Do nothing to mock a delay of health check response from server side. - // This case is to help with the test that covers the condition that setConnectivityState is not - // called inside HealthCheckFunc before the func returns. - select { - case <-stream.Context().Done(): - case <-time.After(5 * time.Second): - } - return nil - }, - }) - defer deferFunc() - if err != nil { - t.Fatal(err) + watchFunc := func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + if in.Service != "delay" { + return status.Error(codes.FailedPrecondition, + "this special Watch function only handles request with service name to be \"delay\"") + } + // Do nothing to mock a delay of health check response from server side. + // This case is to help with the test that covers the condition that setConnectivityState is not + // called inside HealthCheckFunc before the func returns. + select { + case <-stream.Context().Done(): + case <-time.After(5 * time.Second): + } + return nil } - + s, lis, ts := setupServer(t, watchFunc) ts.SetServingStatus("delay", healthpb.HealthCheckResponse_SERVING) - _, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() + hcEnterChan, hcExitChan, testHealthCheckFuncWrapper := setupHealthCheckWrapper() + _, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) // The serviceName "delay" is specially handled at server side, where response will not be sent // back to client immediately upon receiving the request (client should receive no response until // test ends). r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "delay" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) select { @@ -749,25 +659,18 @@ func (s) TestHealthCheckWithoutSetConnectivityStateCalled(t *testing.T) { func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", + cc, r := setupClient(t, &clientConfig{ testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, extraDialOption: []grpc.DialOption{grpc.WithDisableHealthCheck()}, }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - tc := testpb.NewTestServiceClient(cc) - + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -791,24 +694,17 @@ func testHealthCheckDisableWithDialOption(t *testing.T, addr string) { func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "pick_first", + cc, r := setupClient(t, &clientConfig{ testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - tc := testpb.NewTestServiceClient(cc) - + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: addr}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "foo" - } + }, + "loadBalancingConfig": [{"pick_first":{}}] }`)}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -832,18 +728,8 @@ func testHealthCheckDisableWithBalancer(t *testing.T, addr string) { func testHealthCheckDisableWithServiceConfig(t *testing.T, addr string) { hcEnterChan, _, testHealthCheckFuncWrapper := setupHealthCheckWrapper() - - cc, r, deferFunc, err := setupClient(&clientConfig{ - balancerName: "round_robin", - testHealthCheckFuncWrapper: testHealthCheckFuncWrapper, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - tc := testpb.NewTestServiceClient(cc) - + cc, r := setupClient(t, &clientConfig{testHealthCheckFuncWrapper: testHealthCheckFuncWrapper}) + tc := testgrpc.NewTestServiceClient(cc) r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: addr}}}) ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) @@ -866,11 +752,7 @@ func testHealthCheckDisableWithServiceConfig(t *testing.T, addr string) { } func (s) TestHealthCheckDisable(t *testing.T) { - _, lis, ts, deferFunc, err := setupServer(&svrConfig{}) - defer deferFunc() - if err != nil { - t.Fatal(err) - } + _, lis, ts := setupServer(t, nil) ts.SetServingStatus("foo", healthpb.HealthCheckResponse_SERVING) // test client side disabling configuration. @@ -880,32 +762,23 @@ func (s) TestHealthCheckDisable(t *testing.T) { } func (s) TestHealthCheckChannelzCountingCallSuccess(t *testing.T) { - _, lis, _, deferFunc, err := setupServer(&svrConfig{ - specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - if in.Service != "channelzSuccess" { - return status.Error(codes.FailedPrecondition, - "this special Watch function only handles request with service name to be \"channelzSuccess\"") - } - return status.Error(codes.OK, "fake success") - }, - }) - defer deferFunc() - if err != nil { - t.Fatal(err) - } - - _, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) - if err != nil { - t.Fatal(err) + watchFunc := func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + if in.Service != "channelzSuccess" { + return status.Error(codes.FailedPrecondition, + "this special Watch function only handles request with service name to be \"channelzSuccess\"") + } + return status.Error(codes.OK, "fake success") } - defer deferFunc() + _, lis, _ := setupServer(t, watchFunc) + _, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "channelzSuccess" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) if err := verifyResultWithDelay(func() (bool, error) { @@ -936,32 +809,23 @@ func (s) TestHealthCheckChannelzCountingCallSuccess(t *testing.T) { } func (s) TestHealthCheckChannelzCountingCallFailure(t *testing.T) { - _, lis, _, deferFunc, err := setupServer(&svrConfig{ - specialWatchFunc: func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - if in.Service != "channelzFailure" { - return status.Error(codes.FailedPrecondition, - "this special Watch function only handles request with service name to be \"channelzFailure\"") - } - return status.Error(codes.Internal, "fake failure") - }, - }) - if err != nil { - t.Fatal(err) - } - defer deferFunc() - - _, r, deferFunc, err := setupClient(&clientConfig{balancerName: "round_robin"}) - if err != nil { - t.Fatal(err) + watchFunc := func(s *testHealthServer, in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + if in.Service != "channelzFailure" { + return status.Error(codes.FailedPrecondition, + "this special Watch function only handles request with service name to be \"channelzFailure\"") + } + return status.Error(codes.Internal, "fake failure") } - defer deferFunc() + _, lis, _ := setupServer(t, watchFunc) + _, r := setupClient(t, nil) r.UpdateState(resolver.State{ Addresses: []resolver.Address{{Addr: lis.Addr().String()}}, - ServiceConfig: parseCfg(r, `{ + ServiceConfig: parseServiceConfig(t, r, `{ "healthCheckConfig": { "serviceName": "channelzFailure" - } + }, + "loadBalancingConfig": [{"round_robin":{}}] }`)}) if err := verifyResultWithDelay(func() (bool, error) { @@ -990,3 +854,313 @@ func (s) TestHealthCheckChannelzCountingCallFailure(t *testing.T) { t.Fatal(err) } } + +// healthCheck is a helper function to make a unary health check RPC and return +// the response. +func healthCheck(d time.Duration, cc *grpc.ClientConn, service string) (*healthpb.HealthCheckResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), d) + defer cancel() + hc := healthgrpc.NewHealthClient(cc) + return hc.Check(ctx, &healthpb.HealthCheckRequest{Service: service}) +} + +// verifyHealthCheckStatus is a helper function to verify that the current +// health status of the service matches the one passed in 'wantStatus'. +func verifyHealthCheckStatus(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantStatus healthpb.HealthCheckResponse_ServingStatus) { + t.Helper() + resp, err := healthCheck(d, cc, service) + if err != nil { + t.Fatalf("Health/Check(_, _) = _, %v, want _, ", err) + } + if resp.Status != wantStatus { + t.Fatalf("Got the serving status %v, want %v", resp.Status, wantStatus) + } +} + +// verifyHealthCheckErrCode is a helper function to verify that a unary health +// check RPC returns an error with a code set to 'wantCode'. +func verifyHealthCheckErrCode(t *testing.T, d time.Duration, cc *grpc.ClientConn, service string, wantCode codes.Code) { + t.Helper() + if _, err := healthCheck(d, cc, service); status.Code(err) != wantCode { + t.Fatalf("Health/Check() got errCode %v, want %v", status.Code(err), wantCode) + } +} + +// newHealthCheckStream is a helper function to start a health check streaming +// RPC, and returns the stream. +func newHealthCheckStream(t *testing.T, cc *grpc.ClientConn, service string) (healthgrpc.Health_WatchClient, context.CancelFunc) { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + hc := healthgrpc.NewHealthClient(cc) + stream, err := hc.Watch(ctx, &healthpb.HealthCheckRequest{Service: service}) + if err != nil { + t.Fatalf("hc.Watch(_, %v) failed: %v", service, err) + } + return stream, cancel +} + +// healthWatchChecker is a helper function to verify that the next health +// status returned on the given stream matches the one passed in 'wantStatus'. +func healthWatchChecker(t *testing.T, stream healthgrpc.Health_WatchClient, wantStatus healthpb.HealthCheckResponse_ServingStatus) { + t.Helper() + response, err := stream.Recv() + if err != nil { + t.Fatalf("stream.Recv() failed: %v", err) + } + if response.Status != wantStatus { + t.Fatalf("got servingStatus %v, want %v", response.Status, wantStatus) + } +} + +// TestHealthCheckSuccess invokes the unary Check() RPC on the health server in +// a successful case. +func (s) TestHealthCheckSuccess(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckSuccess(t, e) + } +} + +func testHealthCheckSuccess(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + defer te.tearDown() + + verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.OK) +} + +// TestHealthCheckFailure invokes the unary Check() RPC on the health server +// with an expired context and expects the RPC to fail. +func (s) TestHealthCheckFailure(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckFailure(t, e) + } +} + +func testHealthCheckFailure(t *testing.T, e env) { + te := newTest(t, e) + te.declareLogNoise( + "Failed to dial ", + "grpc: the client connection is closing; please retry", + ) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + defer te.tearDown() + + verifyHealthCheckErrCode(t, 0*time.Second, te.clientConn(), defaultHealthService, codes.DeadlineExceeded) + awaitNewConnLogOutput() +} + +// TestHealthCheckOff makes a unary Check() RPC on the health server where the +// health status of the defaultHealthService is not set, and therefore expects +// an error code 'codes.NotFound'. +func (s) TestHealthCheckOff(t *testing.T) { + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testHealthCheckOff(t, e) + } +} + +func testHealthCheckOff(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), defaultHealthService, codes.NotFound) +} + +// TestHealthWatchMultipleClients makes a streaming Watch() RPC on the health +// server with multiple clients and expects the same status on both streams. +func (s) TestHealthWatchMultipleClients(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchMultipleClients(t, e) + } +} + +func testHealthWatchMultipleClients(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + stream1, cf1 := newHealthCheckStream(t, cc, defaultHealthService) + defer cf1() + healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) + + stream2, cf2 := newHealthCheckStream(t, cc, defaultHealthService) + defer cf2() + healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) + + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) + healthWatchChecker(t, stream1, healthpb.HealthCheckResponse_NOT_SERVING) + healthWatchChecker(t, stream2, healthpb.HealthCheckResponse_NOT_SERVING) +} + +// TestHealthWatchSameStatusmakes a streaming Watch() RPC on the health server +// and makes sure that the health status of the server is as expected after +// multiple calls to SetServingStatus with the same status. +func (s) TestHealthWatchSameStatus(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchSameStatus(t, e) + } +} + +func testHealthWatchSameStatus(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) + defer cf() + + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) +} + +// TestHealthWatchServiceStatusSetBeforeStartingServer starts a health server +// on which the health status for the defaultService is set before the gRPC +// server is started, and expects the correct health status to be returned. +func (s) TestHealthWatchServiceStatusSetBeforeStartingServer(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchSetServiceStatusBeforeStartingServer(t, e) + } +} + +func testHealthWatchSetServiceStatusBeforeStartingServer(t *testing.T, e env) { + hs := health.NewServer() + te := newTest(t, e) + te.healthServer = hs + hs.SetServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) + defer cf() + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) +} + +// TestHealthWatchDefaultStatusChange verifies the simple case where the +// service starts off with a SERVICE_UNKNOWN status (because SetServingStatus +// hasn't been called yet) and then moves to SERVING after SetServingStatus is +// called. +func (s) TestHealthWatchDefaultStatusChange(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchDefaultStatusChange(t, e) + } +} + +func testHealthWatchDefaultStatusChange(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) + defer cf() + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVICE_UNKNOWN) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) +} + +// TestHealthWatchSetServiceStatusBeforeClientCallsWatch verifies the case +// where the health status is set to SERVING before the client calls Watch(). +func (s) TestHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchSetServiceStatusBeforeClientCallsWatch(t, e) + } +} + +func testHealthWatchSetServiceStatusBeforeClientCallsWatch(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), defaultHealthService) + defer cf() + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) +} + +// TestHealthWatchOverallServerHealthChange verifies setting the overall status +// of the server by using the empty service name. +func (s) TestHealthWatchOverallServerHealthChange(t *testing.T) { + for _, e := range listTestEnv() { + testHealthWatchOverallServerHealthChange(t, e) + } +} + +func testHealthWatchOverallServerHealthChange(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + stream, cf := newHealthCheckStream(t, te.clientConn(), "") + defer cf() + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_SERVING) + te.setHealthServingStatus("", healthpb.HealthCheckResponse_NOT_SERVING) + healthWatchChecker(t, stream, healthpb.HealthCheckResponse_NOT_SERVING) +} + +// TestUnknownHandler verifies that an expected error is returned (by setting +// the unknownHandler on the server) for a service which is not exposed to the +// client. +func (s) TestUnknownHandler(t *testing.T) { + // An example unknownHandler that returns a different code and a different + // method, making sure that we do not expose what methods are implemented to + // a client that is not authenticated. + unknownHandler := func(srv interface{}, stream grpc.ServerStream) error { + return status.Error(codes.Unauthenticated, "user unauthenticated") + } + for _, e := range listTestEnv() { + // TODO(bradfitz): Temporarily skip this env due to #619. + if e.name == "handler-tls" { + continue + } + testUnknownHandler(t, e, unknownHandler) + } +} + +func testUnknownHandler(t *testing.T, e env, unknownHandler grpc.StreamHandler) { + te := newTest(t, e) + te.unknownHandler = unknownHandler + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + verifyHealthCheckErrCode(t, 1*time.Second, te.clientConn(), "", codes.Unauthenticated) +} + +// TestHealthCheckServingStatus makes a streaming Watch() RPC on the health +// server and verifies a bunch of health status transitions. +func (s) TestHealthCheckServingStatus(t *testing.T) { + for _, e := range listTestEnv() { + testHealthCheckServingStatus(t, e) + } +} + +func testHealthCheckServingStatus(t *testing.T, e env) { + te := newTest(t, e) + te.enableHealthServer = true + te.startServer(&testServer{security: e.security}) + defer te.tearDown() + + cc := te.clientConn() + verifyHealthCheckStatus(t, 1*time.Second, cc, "", healthpb.HealthCheckResponse_SERVING) + verifyHealthCheckErrCode(t, 1*time.Second, cc, defaultHealthService, codes.NotFound) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_SERVING) + verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_SERVING) + te.setHealthServingStatus(defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) + verifyHealthCheckStatus(t, 1*time.Second, cc, defaultHealthService, healthpb.HealthCheckResponse_NOT_SERVING) +} diff --git a/test/http_header_end2end_test.go b/test/http_header_end2end_test.go new file mode 100644 index 000000000000..77867133f95c --- /dev/null +++ b/test/http_header_end2end_test.go @@ -0,0 +1,261 @@ +/* +* +* Copyright 2022 gRPC authors. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* + */ +package test + +import ( + "context" + "fmt" + "net" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" +) + +func (s) TestHTTPHeaderFrameErrorHandlingHTTPMode(t *testing.T) { + type test struct { + name string + header []string + errCode codes.Code + } + + var tests []test + + // Non-gRPC content-type fallback path. + for httpCode := range transport.HTTPStatusConvTab { + tests = append(tests, test{ + name: fmt.Sprintf("Non-gRPC content-type fallback path with httpCode: %v", httpCode), + header: []string{ + ":status", fmt.Sprintf("%d", httpCode), + "content-type", "text/html", // non-gRPC content type to switch to HTTP mode. + "grpc-status", "1", // Make up a gRPC status error + "grpc-status-details-bin", "???", // Make up a gRPC field parsing error + }, + errCode: transport.HTTPStatusConvTab[int(httpCode)], + }) + } + + // Missing content-type fallback path. + for httpCode := range transport.HTTPStatusConvTab { + tests = append(tests, test{ + name: fmt.Sprintf("Missing content-type fallback path with httpCode: %v", httpCode), + header: []string{ + ":status", fmt.Sprintf("%d", httpCode), + // Omitting content type to switch to HTTP mode. + "grpc-status", "1", // Make up a gRPC status error + "grpc-status-details-bin", "???", // Make up a gRPC field parsing error + }, + errCode: transport.HTTPStatusConvTab[int(httpCode)], + }) + } + + // Malformed HTTP status when fallback. + tests = append(tests, test{ + name: "Malformed HTTP status when fallback", + header: []string{ + ":status", "abc", + // Omitting content type to switch to HTTP mode. + "grpc-status", "1", // Make up a gRPC status error + "grpc-status-details-bin", "???", // Make up a gRPC field parsing error + }, + errCode: codes.Internal, + }) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + serverAddr, cleanup, err := startServer(t, test.header) + if err != nil { + t.Fatal(err) + } + defer cleanup() + if err := doHTTPHeaderTest(serverAddr, test.errCode); err != nil { + t.Error(err) + } + }) + } +} + +// Testing erroneous ResponseHeader or Trailers-only (delivered in the first HEADERS frame). +func (s) TestHTTPHeaderFrameErrorHandlingInitialHeader(t *testing.T) { + for _, test := range []struct { + name string + header []string + errCode codes.Code + }{ + { + name: "missing gRPC status", + header: []string{ + ":status", "403", + "content-type", "application/grpc", + }, + errCode: codes.PermissionDenied, + }, + { + name: "malformed grpc-status", + header: []string{ + ":status", "502", + "content-type", "application/grpc", + "grpc-status", "abc", + }, + errCode: codes.Internal, + }, + { + name: "Malformed grpc-tags-bin field", + header: []string{ + ":status", "502", + "content-type", "application/grpc", + "grpc-status", "0", + "grpc-tags-bin", "???", + }, + errCode: codes.Unavailable, + }, + { + name: "gRPC status error", + header: []string{ + ":status", "502", + "content-type", "application/grpc", + "grpc-status", "3", + }, + errCode: codes.Unavailable, + }, + } { + t.Run(test.name, func(t *testing.T) { + serverAddr, cleanup, err := startServer(t, test.header) + if err != nil { + t.Fatal(err) + } + defer cleanup() + if err := doHTTPHeaderTest(serverAddr, test.errCode); err != nil { + t.Error(err) + } + }) + } +} + +// Testing non-Trailers-only Trailers (delivered in second HEADERS frame) +func (s) TestHTTPHeaderFrameErrorHandlingNormalTrailer(t *testing.T) { + tests := []struct { + name string + responseHeader []string + trailer []string + errCode codes.Code + }{ + { + name: "trailer missing grpc-status", + responseHeader: []string{ + ":status", "200", + "content-type", "application/grpc", + }, + trailer: []string{ + // trailer missing grpc-status + ":status", "502", + }, + errCode: codes.Unavailable, + }, + { + name: "malformed grpc-status-details-bin field with status 404", + responseHeader: []string{ + ":status", "404", + "content-type", "application/grpc", + }, + trailer: []string{ + // malformed grpc-status-details-bin field + "grpc-status", "0", + "grpc-status-details-bin", "????", + }, + errCode: codes.Unimplemented, + }, + { + name: "malformed grpc-status-details-bin field with status 200", + responseHeader: []string{ + ":status", "200", + "content-type", "application/grpc", + }, + trailer: []string{ + // malformed grpc-status-details-bin field + "grpc-status", "0", + "grpc-status-details-bin", "????", + }, + errCode: codes.Internal, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + serverAddr, cleanup, err := startServer(t, test.responseHeader, test.trailer) + if err != nil { + t.Fatal(err) + } + defer cleanup() + if err := doHTTPHeaderTest(serverAddr, test.errCode); err != nil { + t.Error(err) + } + }) + + } +} + +func (s) TestHTTPHeaderFrameErrorHandlingMoreThanTwoHeaders(t *testing.T) { + header := []string{ + ":status", "200", + "content-type", "application/grpc", + } + serverAddr, cleanup, err := startServer(t, header, header, header) + if err != nil { + t.Fatal(err) + } + defer cleanup() + if err := doHTTPHeaderTest(serverAddr, codes.Internal); err != nil { + t.Fatal(err) + } +} + +func startServer(t *testing.T, headerFields ...[]string) (serverAddr string, cleanup func(), err error) { + t.Helper() + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + return "", nil, fmt.Errorf("listening on %q: %v", "localhost:0", err) + } + server := &httpServer{responses: []httpServerResponse{{trailers: headerFields}}} + server.start(t, lis) + return lis.Addr().String(), func() { lis.Close() }, nil +} + +func doHTTPHeaderTest(lisAddr string, errCode codes.Code) error { + cc, err := grpc.Dial(lisAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return fmt.Errorf("dial(%q): %v", lisAddr, err) + } + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + stream, err := client.FullDuplexCall(ctx) + if err != nil { + return fmt.Errorf("creating FullDuplex stream: %v", err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != errCode { + return fmt.Errorf("stream.Recv() = %v, want error code: %v", err, errCode) + } + return nil +} diff --git a/test/idleness_test.go b/test/idleness_test.go new file mode 100644 index 000000000000..b7e7ea6cd7cf --- /dev/null +++ b/test/idleness_test.go @@ -0,0 +1,459 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +const defaultTestShortIdleTimeout = 500 * time.Millisecond + +// channelzTraceEventFound looks up the top-channels in channelz (expects a +// single one), and checks if there is a trace event on the channel matching the +// provided description string. +func channelzTraceEventFound(ctx context.Context, wantDesc string) error { + for ctx.Err() == nil { + tcs, _ := channelz.GetTopChannels(0, 0) + if l := len(tcs); l != 1 { + return fmt.Errorf("when looking for channelz trace event with description %q, found %d top-level channels, want 1", wantDesc, l) + } + if tcs[0].Trace == nil { + return fmt.Errorf("when looking for channelz trace event with description %q, no trace events found for top-level channel", wantDesc) + } + + for _, e := range tcs[0].Trace.Events { + if strings.Contains(e.Desc, wantDesc) { + return nil + } + } + } + return fmt.Errorf("when looking for channelz trace event with description %q, %w", wantDesc, ctx.Err()) +} + +// channelzTraceEventNotFound looks up the top-channels in channelz (expects a +// single one), and verifies that there is no trace event on the channel +// matching the provided description string. +func channelzTraceEventNotFound(ctx context.Context, wantDesc string) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + + err := channelzTraceEventFound(sCtx, wantDesc) + if err == nil { + return fmt.Errorf("found channelz trace event with description %q, when expected not to", wantDesc) + } + if !errors.Is(err, context.DeadlineExceeded) { + return err + } + return nil +} + +// Tests the case where channel idleness is disabled by passing an idle_timeout +// of 0. Verifies that a READY channel with no RPCs does not move to IDLE. +func (s) TestChannelIdleness_Disabled_NoActivity(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Create a ClientConn with idle_timeout set to 0. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(0), // Disable idleness. + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Start a test backend and push an address update via the resolver. + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Verify that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Verify that the ClientConn stay in READY. + sCtx, sCancel := context.WithTimeout(ctx, 3*defaultTestShortIdleTimeout) + defer sCancel() + awaitNoStateChange(sCtx, t, cc, connectivity.Ready) + + // Verify that there are no idleness related channelz events. + if err := channelzTraceEventNotFound(ctx, "entering idle mode"); err != nil { + t.Fatal(err) + } + if err := channelzTraceEventNotFound(ctx, "exiting idle mode"); err != nil { + t.Fatal(err) + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Verifies that a READY channel with no RPCs moves to IDLE. +func (s) TestChannelIdleness_Enabled_NoActivity(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Create a ClientConn with a short idle_timeout. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Start a test backend and push an address update via the resolver. + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Verify that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Verify that the ClientConn moves to IDLE as there is no activity. + awaitState(ctx, t, cc, connectivity.Idle) + + // Verify idleness related channelz events. + if err := channelzTraceEventFound(ctx, "entering idle mode"); err != nil { + t.Fatal(err) + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Verifies that a READY channel with an ongoing RPC stays READY. +func (s) TestChannelIdleness_Enabled_OngoingCall(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Create a ClientConn with a short idle_timeout. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Start a test backend which keeps a unary RPC call active by blocking on a + // channel that is closed by the test later on. Also push an address update + // via the resolver. + blockCh := make(chan struct{}) + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + <-blockCh + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Cleanup(backend.Stop) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Verify that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Spawn a goroutine which checks expected state transitions and idleness + // channelz trace events. It eventually closes `blockCh`, thereby unblocking + // the server RPC handler and the unary call below. + errCh := make(chan error, 1) + go func() { + // Verify that the ClientConn stay in READY. + sCtx, sCancel := context.WithTimeout(ctx, 3*defaultTestShortIdleTimeout) + defer sCancel() + awaitNoStateChange(sCtx, t, cc, connectivity.Ready) + + // Verify that there are no idleness related channelz events. + if err := channelzTraceEventNotFound(ctx, "entering idle mode"); err != nil { + errCh <- err + return + } + if err := channelzTraceEventNotFound(ctx, "exiting idle mode"); err != nil { + errCh <- err + return + } + + // Unblock the unary RPC on the server. + close(blockCh) + errCh <- nil + }() + + // Make a unary RPC that blocks on the server, thereby ensuring that the + // count of active RPCs on the client is non-zero. + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Errorf("EmptyCall RPC failed: %v", err) + } + + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + case <-ctx.Done(): + t.Fatalf("Timeout when trying to verify that an active RPC keeps channel from moving to IDLE") + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Verifies that activity on a READY channel (frequent and short +// RPCs) keeps it from moving to IDLE. +func (s) TestChannelIdleness_Enabled_ActiveSinceLastCheck(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Create a ClientConn with a short idle_timeout. + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Start a test backend and push an address update via the resolver. + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Verify that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // For a duration of three times the configured idle timeout, making RPCs + // every now and then and ensure that the channel does not move out of + // READY. + sCtx, sCancel := context.WithTimeout(ctx, 3*defaultTestShortIdleTimeout) + defer sCancel() + go func() { + for ; sCtx.Err() == nil; <-time.After(defaultTestShortIdleTimeout / 4) { + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); err != nil { + // While iterating through this for loop, at some point in time, + // the context deadline will expire. It is safe to ignore that + // error code. + if status.Code(err) != codes.DeadlineExceeded { + t.Errorf("EmptyCall RPC failed: %v", err) + return + } + } + } + }() + + // Verify that the ClientConn stay in READY. + awaitNoStateChange(sCtx, t, cc, connectivity.Ready) + + // Verify that there are no idleness related channelz events. + if err := channelzTraceEventNotFound(ctx, "entering idle mode"); err != nil { + t.Fatal(err) + } + if err := channelzTraceEventNotFound(ctx, "exiting idle mode"); err != nil { + t.Fatal(err) + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Verifies that a READY channel with no RPCs moves to IDLE. Also +// verifies that a subsequent RPC on the IDLE channel kicks it out of IDLE. +func (s) TestChannelIdleness_Enabled_ExitIdleOnRPC(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Start a test backend and set the bootstrap state of the resolver to + // include this address. This will ensure that when the resolver is + // restarted when exiting idle, it will push the same address to grpc again. + r := manual.NewBuilderWithScheme("whatever") + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Create a ClientConn with a short idle_timeout. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Verify that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Verify that the ClientConn moves to IDLE as there is no activity. + awaitState(ctx, t, cc, connectivity.Idle) + + // Verify idleness related channelz events. + if err := channelzTraceEventFound(ctx, "entering idle mode"); err != nil { + t.Fatal(err) + } + + // Make an RPC and ensure that it succeeds and moves the channel back to + // READY. + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall RPC failed: %v", err) + } + awaitState(ctx, t, cc, connectivity.Ready) + if err := channelzTraceEventFound(ctx, "exiting idle mode"); err != nil { + t.Fatal(err) + } +} + +// Tests the case where channel idleness is enabled by passing a small value for +// idle_timeout. Simulates a race between the idle timer firing and RPCs being +// initiated, after a period of inactivity on the channel. +// +// After a period of inactivity (for the configured idle timeout duration), when +// RPCs are started, there are two possibilities: +// - the idle timer wins the race and puts the channel in idle. The RPCs then +// kick it out of idle. +// - the RPCs win the race, and therefore the channel never moves to idle. +// +// In either of these cases, all RPCs must succeed. +func (s) TestChannelIdleness_Enabled_IdleTimeoutRacesWithRPCs(t *testing.T) { + // Setup channelz for testing. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + // Start a test backend and set the bootstrap state of the resolver to + // include this address. This will ensure that when the resolver is + // restarted when exiting idle, it will push the same address to grpc again. + r := manual.NewBuilderWithScheme("whatever") + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Create a ClientConn with a short idle_timeout. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Verify that the ClientConn moves to READY. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + awaitState(ctx, t, cc, connectivity.Ready) + + // Make an RPC every defaultTestShortTimeout duration so as to race with the + // idle timeout. Whether the idle timeout wins the race or the RPC wins the + // race, RPCs must succeed. + client := testgrpc.NewTestServiceClient(cc) + for i := 0; i < 20; i++ { + <-time.After(defaultTestShortTimeout) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Errorf("EmptyCall RPC failed: %v", err) + } + } +} + +// Tests the case where the channel is IDLE and we call cc.Connect. +func (s) TestChannelIdleness_Connect(t *testing.T) { + // Start a test backend and set the bootstrap state of the resolver to + // include this address. This will ensure that when the resolver is + // restarted when exiting idle, it will push the same address to grpc again. + r := manual.NewBuilderWithScheme("whatever") + backend := stubserver.StartTestService(t, nil) + t.Cleanup(backend.Stop) + r.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: backend.Address}}}) + + // Create a ClientConn with a short idle_timeout. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithIdleTimeout(defaultTestShortIdleTimeout), + grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"round_robin":{}}]}`), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Verify that the ClientConn moves to IDLE. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + awaitState(ctx, t, cc, connectivity.Idle) + + // Connect should exit channel idleness. + cc.Connect() + + // Verify that the ClientConn moves back to READY. + awaitState(ctx, t, cc, connectivity.Ready) +} diff --git a/test/insecure_creds_test.go b/test/insecure_creds_test.go index 19f8bb8b791b..0647c81232ae 100644 --- a/test/insecure_creds_test.go +++ b/test/insecure_creds_test.go @@ -23,7 +23,6 @@ import ( "net" "strings" "testing" - "time" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -33,11 +32,10 @@ import ( "google.golang.org/grpc/peer" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) -const defaultTestTimeout = 5 * time.Second - // testLegacyPerRPCCredentials is a PerRPCCredentials that has yet incorporated security level. type testLegacyPerRPCCredentials struct{} @@ -114,7 +112,7 @@ func (s) TestInsecureCreds(t *testing.T) { s := grpc.NewServer(sOpts...) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -124,21 +122,19 @@ func (s) TestInsecureCreds(t *testing.T) { go s.Serve(lis) addr := lis.Addr().String() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cOpts := []grpc.DialOption{grpc.WithBlock()} + opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} if test.clientInsecureCreds { - cOpts = append(cOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } else { - cOpts = append(cOpts, grpc.WithInsecure()) + opts = []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} } - cc, err := grpc.DialContext(ctx, addr, cOpts...) + cc, err := grpc.Dial(addr, opts...) if err != nil { t.Fatalf("grpc.Dial(%q) failed: %v", addr, err) } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() if _, err = c.EmptyCall(ctx, &testpb.Empty{}); err != nil { t.Fatalf("EmptyCall(_, _) = _, %v; want _, ", err) } @@ -146,73 +142,66 @@ func (s) TestInsecureCreds(t *testing.T) { } } -func (s) TestInsecureCredsWithPerRPCCredentials(t *testing.T) { - tests := []struct { - desc string - perRPCCredsViaDialOptions bool - perRPCCredsViaCallOptions bool - wantErr string - }{ - { - desc: "send PerRPCCredentials via DialOptions", - perRPCCredsViaDialOptions: true, - perRPCCredsViaCallOptions: false, - wantErr: "context deadline exceeded", - }, - { - desc: "send PerRPCCredentials via CallOptions", - perRPCCredsViaDialOptions: false, - perRPCCredsViaCallOptions: true, - wantErr: "transport: cannot send secure credentials on an insecure connection", +func (s) TestInsecureCreds_WithPerRPCCredentials_AsCallOption(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil }, } - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - ss := &stubserver.StubServer{ - EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil - }, - } - sOpts := []grpc.ServerOption{} - sOpts = append(sOpts, grpc.Creds(insecure.NewCredentials())) - s := grpc.NewServer(sOpts...) - defer s.Stop() + s := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, ss) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("net.Listen(tcp, localhost:0) failed: %v", err) + } + go s.Serve(lis) - testpb.RegisterTestServiceServer(s, ss) + addr := lis.Addr().String() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("net.Listen(tcp, localhost:0) failed: %v", err) - } + dopts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} + copts := []grpc.CallOption{grpc.PerRPCCredentials(testLegacyPerRPCCredentials{})} + cc, err := grpc.Dial(addr, dopts...) + if err != nil { + t.Fatalf("grpc.Dial(%q) failed: %v", addr, err) + } + defer cc.Close() - go s.Serve(lis) + const wantErr = "transport: cannot send secure credentials on an insecure connection" + c := testgrpc.NewTestServiceClient(cc) + if _, err = c.EmptyCall(ctx, &testpb.Empty{}, copts...); err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("insecure credentials with per-RPC credentials requiring transport security returned error: %v; want %s", err, wantErr) + } +} - addr := lis.Addr().String() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cOpts := []grpc.DialOption{grpc.WithBlock()} - cOpts = append(cOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - - if test.perRPCCredsViaDialOptions { - cOpts = append(cOpts, grpc.WithPerRPCCredentials(testLegacyPerRPCCredentials{})) - if _, err := grpc.DialContext(ctx, addr, cOpts...); !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("InsecureCredsWithPerRPCCredentials/send_PerRPCCredentials_via_DialOptions = %v; want %s", err, test.wantErr) - } - } +func (s) TestInsecureCreds_WithPerRPCCredentials_AsDialOption(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(_ context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } - if test.perRPCCredsViaCallOptions { - cc, err := grpc.DialContext(ctx, addr, cOpts...) - if err != nil { - t.Fatalf("grpc.Dial(%q) failed: %v", addr, err) - } - defer cc.Close() - - c := testpb.NewTestServiceClient(cc) - if _, err = c.EmptyCall(ctx, &testpb.Empty{}, grpc.PerRPCCredentials(testLegacyPerRPCCredentials{})); !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("InsecureCredsWithPerRPCCredentials/send_PerRPCCredentials_via_CallOptions = %v; want %s", err, test.wantErr) - } - } - }) + s := grpc.NewServer(grpc.Creds(insecure.NewCredentials())) + defer s.Stop() + testgrpc.RegisterTestServiceServer(s, ss) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("net.Listen(tcp, localhost:0) failed: %v", err) + } + go s.Serve(lis) + + addr := lis.Addr().String() + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithPerRPCCredentials(testLegacyPerRPCCredentials{}), + } + const wantErr = "the credentials require transport level security" + if _, err := grpc.Dial(addr, dopts...); err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("grpc.Dial(%q) returned err %v, want: %v", addr, err, wantErr) } } diff --git a/test/interceptor_test.go b/test/interceptor_test.go new file mode 100644 index 000000000000..c2004bb85465 --- /dev/null +++ b/test/interceptor_test.go @@ -0,0 +1,281 @@ +/* + * + * Copyright 2022 gRPC authors. + + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +type parentCtxkey struct{} +type firstInterceptorCtxkey struct{} +type secondInterceptorCtxkey struct{} +type baseInterceptorCtxKey struct{} + +const ( + parentCtxVal = "parent" + firstInterceptorCtxVal = "firstInterceptor" + secondInterceptorCtxVal = "secondInterceptor" + baseInterceptorCtxVal = "baseInterceptor" +) + +// TestUnaryClientInterceptor_ContextValuePropagation verifies that a unary +// interceptor receives context values specified in the context passed to the +// RPC call. +func (s) TestUnaryClientInterceptor_ContextValuePropagation(t *testing.T) { + errCh := testutils.NewChannel() + unaryInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.Send(fmt.Errorf("unaryInt got %q in context.Val, want %q", got, parentCtxVal)) + } + errCh.Send(nil) + return invoker(ctx, method, req, reply, cc, opts...) + } + + // Start a stub server and use the above unary interceptor while creating a + // ClientConn to it. + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil, grpc.WithUnaryInterceptor(unaryInt)); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.EmptyCall(context.WithValue(ctx, parentCtxkey{}, parentCtxVal), &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall() failed: %v", err) + } + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for unary interceptor to be invoked: %v", err) + } + if val != nil { + t.Fatalf("unary interceptor failed: %v", val) + } +} + +// TestChainUnaryClientInterceptor_ContextValuePropagation verifies that a chain +// of unary interceptors receive context values specified in the original call +// as well as the ones specified by prior interceptors in the chain. +func (s) TestChainUnaryClientInterceptor_ContextValuePropagation(t *testing.T) { + errCh := testutils.NewChannel() + firstInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("first interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if ctx.Value(firstInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("first interceptor should not have %T in context", firstInterceptorCtxkey{})) + } + if ctx.Value(secondInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("first interceptor should not have %T in context", secondInterceptorCtxkey{})) + } + firstCtx := context.WithValue(ctx, firstInterceptorCtxkey{}, firstInterceptorCtxVal) + return invoker(firstCtx, method, req, reply, cc, opts...) + } + + secondInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("second interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(firstInterceptorCtxkey{}).(string); !ok || got != firstInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("second interceptor got %q in context.Val, want %q", got, firstInterceptorCtxVal)) + } + if ctx.Value(secondInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("second interceptor should not have %T in context", secondInterceptorCtxkey{})) + } + secondCtx := context.WithValue(ctx, secondInterceptorCtxkey{}, secondInterceptorCtxVal) + return invoker(secondCtx, method, req, reply, cc, opts...) + } + + lastInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(firstInterceptorCtxkey{}).(string); !ok || got != firstInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, firstInterceptorCtxVal)) + } + if got, ok := ctx.Value(secondInterceptorCtxkey{}).(string); !ok || got != secondInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, secondInterceptorCtxVal)) + } + errCh.SendContext(ctx, nil) + return invoker(ctx, method, req, reply, cc, opts...) + } + + // Start a stub server and use the above chain of interceptors while creating + // a ClientConn to it. + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil, grpc.WithChainUnaryInterceptor(firstInt, secondInt, lastInt)); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.EmptyCall(context.WithValue(ctx, parentCtxkey{}, parentCtxVal), &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall() failed: %v", err) + } + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for unary interceptor to be invoked: %v", err) + } + if val != nil { + t.Fatalf("unary interceptor failed: %v", val) + } +} + +// TestChainOnBaseUnaryClientInterceptor_ContextValuePropagation verifies that +// unary interceptors specified as a base interceptor or as a chain interceptor +// receive context values specified in the original call as well as the ones +// specified by interceptors in the chain. +func (s) TestChainOnBaseUnaryClientInterceptor_ContextValuePropagation(t *testing.T) { + errCh := testutils.NewChannel() + baseInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("base interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if ctx.Value(baseInterceptorCtxKey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("baseinterceptor should not have %T in context", baseInterceptorCtxKey{})) + } + baseCtx := context.WithValue(ctx, baseInterceptorCtxKey{}, baseInterceptorCtxVal) + return invoker(baseCtx, method, req, reply, cc, opts...) + } + + chainInt := func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("chain interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(baseInterceptorCtxKey{}).(string); !ok || got != baseInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("chain interceptor got %q in context.Val, want %q", got, baseInterceptorCtxVal)) + } + errCh.SendContext(ctx, nil) + return invoker(ctx, method, req, reply, cc, opts...) + } + + // Start a stub server and use the above chain of interceptors while creating + // a ClientConn to it. + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil, grpc.WithUnaryInterceptor(baseInt), grpc.WithChainUnaryInterceptor(chainInt)); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.EmptyCall(context.WithValue(ctx, parentCtxkey{}, parentCtxVal), &testpb.Empty{}); err != nil { + t.Fatalf("ss.Client.EmptyCall() failed: %v", err) + } + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for unary interceptor to be invoked: %v", err) + } + if val != nil { + t.Fatalf("unary interceptor failed: %v", val) + } +} + +// TestChainStreamClientInterceptor_ContextValuePropagation verifies that a +// chain of stream interceptors receive context values specified in the original +// call as well as the ones specified by the prior interceptors in the chain. +func (s) TestChainStreamClientInterceptor_ContextValuePropagation(t *testing.T) { + errCh := testutils.NewChannel() + firstInt := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("first interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if ctx.Value(firstInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("first interceptor should not have %T in context", firstInterceptorCtxkey{})) + } + if ctx.Value(secondInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("first interceptor should not have %T in context", secondInterceptorCtxkey{})) + } + firstCtx := context.WithValue(ctx, firstInterceptorCtxkey{}, firstInterceptorCtxVal) + return streamer(firstCtx, desc, cc, method, opts...) + } + + secondInt := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("second interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(firstInterceptorCtxkey{}).(string); !ok || got != firstInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("second interceptor got %q in context.Val, want %q", got, firstInterceptorCtxVal)) + } + if ctx.Value(secondInterceptorCtxkey{}) != nil { + errCh.SendContext(ctx, fmt.Errorf("second interceptor should not have %T in context", secondInterceptorCtxkey{})) + } + secondCtx := context.WithValue(ctx, secondInterceptorCtxkey{}, secondInterceptorCtxVal) + return streamer(secondCtx, desc, cc, method, opts...) + } + + lastInt := func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + if got, ok := ctx.Value(parentCtxkey{}).(string); !ok || got != parentCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, parentCtxVal)) + } + if got, ok := ctx.Value(firstInterceptorCtxkey{}).(string); !ok || got != firstInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, firstInterceptorCtxVal)) + } + if got, ok := ctx.Value(secondInterceptorCtxkey{}).(string); !ok || got != secondInterceptorCtxVal { + errCh.SendContext(ctx, fmt.Errorf("last interceptor got %q in context.Val, want %q", got, secondInterceptorCtxVal)) + } + errCh.SendContext(ctx, nil) + return streamer(ctx, desc, cc, method, opts...) + } + + // Start a stub server and use the above chain of interceptors while creating + // a ClientConn to it. + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return err + } + return stream.Send(&testpb.StreamingOutputCallResponse{}) + }, + } + if err := ss.Start(nil, grpc.WithChainStreamInterceptor(firstInt, secondInt, lastInt)); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := ss.Client.FullDuplexCall(context.WithValue(ctx, parentCtxkey{}, parentCtxVal)); err != nil { + t.Fatalf("ss.Client.FullDuplexCall() failed: %v", err) + } + val, err := errCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for stream interceptor to be invoked: %v", err) + } + if val != nil { + t.Fatalf("stream interceptor failed: %v", val) + } +} diff --git a/test/invoke_test.go b/test/invoke_test.go new file mode 100644 index 000000000000..e829df0a0603 --- /dev/null +++ b/test/invoke_test.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2022 gRPC authors. + + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "strings" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/stubserver" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/status" +) + +// TestInvoke verifies a straightforward invocation of ClientConn.Invoke(). +func (s) TestInvoke(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}); err != nil { + t.Fatalf("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") failed: %v", err) + } +} + +// TestInvokeLargeErr verifies an invocation of ClientConn.Invoke() where the +// server returns a really large error message. +func (s) TestInvokeLargeErr(t *testing.T) { + largeErrorStr := strings.Repeat("A", 1024*1024) + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, status.Error(codes.Internal, largeErrorStr) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}) + if err == nil { + t.Fatal("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") succeeded when expected to fail") + } + st, ok := status.FromError(err) + if !ok { + t.Fatal("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") received non-status error") + } + if status.Code(err) != codes.Internal || st.Message() != largeErrorStr { + t.Fatalf("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") failed with error: %v, want an error of code %d and desc size %d", err, codes.Internal, len(largeErrorStr)) + } +} + +// TestInvokeErrorSpecialChars tests an invocation of ClientConn.Invoke() and +// verifies that error messages don't get mangled. +func (s) TestInvokeErrorSpecialChars(t *testing.T) { + const weirdError = "format verbs: %v%s" + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, status.Error(codes.Internal, weirdError) + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}) + if err == nil { + t.Fatal("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") succeeded when expected to fail") + } + st, ok := status.FromError(err) + if !ok { + t.Fatal("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") received non-status error") + } + if status.Code(err) != codes.Internal || st.Message() != weirdError { + t.Fatalf("grpc.Invoke(\"/grpc.testing.TestService/EmptyCall\") failed with error: %v, want %v", err, weirdError) + } +} + +// TestInvokeCancel tests an invocation of ClientConn.Invoke() with a cancelled +// context and verifies that the request is not actually sent to the server. +func (s) TestInvokeCancel(t *testing.T) { + cancelled := 0 + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + cancelled++ + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + for i := 0; i < 100; i++ { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}) + } + if cancelled != 0 { + t.Fatalf("server received %d of 100 cancelled requests", cancelled) + } +} + +// TestInvokeCancelClosedNonFail tests an invocation of ClientConn.Invoke() with +// a cancelled non-failfast RPC on a closed ClientConn and verifies that the +// call terminates with an error. +func (s) TestInvokeCancelClosedNonFailFast(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Failed to start stub server: %v", err) + } + defer ss.Stop() + + ss.CC.Close() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + if err := ss.CC.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", &testpb.Empty{}, &testpb.Empty{}, grpc.WaitForReady(true)); err == nil { + t.Fatal("ClientConn.Invoke() on closed connection succeeded when expected to fail") + } +} diff --git a/test/kokoro/psm-security.cfg b/test/kokoro/psm-security.cfg new file mode 100644 index 000000000000..040efe9d707e --- /dev/null +++ b/test/kokoro/psm-security.cfg @@ -0,0 +1,13 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-go/test/kokoro/psm-security.sh" +timeout_mins: 240 + +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*.log" + strip_prefix: "artifacts" + } +} diff --git a/test/kokoro/psm-security.sh b/test/kokoro/psm-security.sh new file mode 100755 index 000000000000..46e3709d2e82 --- /dev/null +++ b/test/kokoro/psm-security.sh @@ -0,0 +1,166 @@ +#!/usr/bin/env bash +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# Constants +readonly GITHUB_REPOSITORY_NAME="grpc-go" +readonly TEST_DRIVER_INSTALL_SCRIPT_URL="https://raw.githubusercontent.com/${TEST_DRIVER_REPO_OWNER:-grpc}/grpc/${TEST_DRIVER_BRANCH:-master}/tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh" +## xDS test server/client Docker images +readonly SERVER_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-server" +readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-client" +readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" + +####################################### +# Builds test app Docker images and pushes them to GCR +# Globals: +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test, f.e. v1.42.x, master +# Arguments: +# None +# Outputs: +# Writes the output of `gcloud builds submit` to stdout, stderr +####################################### +build_test_app_docker_images() { + echo "Building Go xDS interop test app Docker images" + docker build -f "${SRC_DIR}/interop/xds/client/Dockerfile" -t "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + docker build -f "${SRC_DIR}/interop/xds/server/Dockerfile" -t "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + gcloud -q auth configure-docker + docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" + docker push "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" + if is_version_branch "${TESTING_VERSION}"; then + tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" + tag_and_push_docker_image "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" + fi +} + +####################################### +# Builds test app and its docker images unless they already exist +# Globals: +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# FORCE_IMAGE_BUILD +# Arguments: +# None +# Outputs: +# Writes the output to stdout, stderr +####################################### +build_docker_images_if_needed() { + # Check if images already exist + server_tags="$(gcloud_gcr_list_image_tags "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Server image: %s:%s\n" "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${server_tags:-Server image not found}" + + client_tags="$(gcloud_gcr_list_image_tags "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Client image: %s:%s\n" "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${client_tags:-Client image not found}" + + # Build if any of the images are missing, or FORCE_IMAGE_BUILD=1 + if [[ "${FORCE_IMAGE_BUILD}" == "1" || -z "${server_tags}" || -z "${client_tags}" ]]; then + build_test_app_docker_images + else + echo "Skipping Go test app build" + fi +} + +####################################### +# Executes the test case +# Globals: +# TEST_DRIVER_FLAGFILE: Relative path to test driver flagfile +# KUBE_CONTEXT: The name of kubectl context with GKE cluster access +# TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test: used by the framework to determine the supported PSM +# features. +# Arguments: +# Test case name +# Outputs: +# Writes the output of test execution to stdout, stderr +# Test xUnit report to ${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml +####################################### +run_test() { + # Test driver usage: + # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage + local test_name="${1:?Usage: run_test test_name}" + set -x + local out_dir="${TEST_XML_OUTPUT_DIR}/${test_name}" + mkdir -pv "${out_dir}" + python -m "tests.${test_name}" \ + --flagfile="${TEST_DRIVER_FLAGFILE}" \ + --kube_context="${KUBE_CONTEXT}" \ + --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ + --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --testing_version="${TESTING_VERSION}" \ + --nocheck_local_certs \ + --force_cleanup \ + --collect_app_logs \ + --log_dir="${out_dir}" \ + --xml_output_file="${out_dir}/sponge_log.xml" \ + |& tee "${out_dir}/sponge_log.log" +} + +####################################### +# Main function: provision software necessary to execute tests, and run them +# Globals: +# KOKORO_ARTIFACTS_DIR +# GITHUB_REPOSITORY_NAME +# SRC_DIR: Populated with absolute path to the source repo +# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing +# the test driver +# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code +# TEST_DRIVER_FLAGFILE: Populated with relative path to test driver flagfile +# TEST_XML_OUTPUT_DIR: Populated with the path to test xUnit XML report +# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build +# GIT_COMMIT: Populated with the SHA-1 of git commit being built +# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built +# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access +# Arguments: +# None +# Outputs: +# Writes the output of test execution to stdout, stderr +####################################### +main() { + local script_dir + script_dir="$(dirname "$0")" + + # Source the test driver from the master branch. + echo "Sourcing test driver install script from: ${TEST_DRIVER_INSTALL_SCRIPT_URL}" + source /dev/stdin <<< "$(curl -s "${TEST_DRIVER_INSTALL_SCRIPT_URL}")" + + activate_gke_cluster GKE_CLUSTER_PSM_SECURITY + + set -x + if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then + kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}" + else + local_setup_test_driver "${script_dir}" + fi + build_docker_images_if_needed + # Run tests + cd "${TEST_DRIVER_FULL_DIR}" + local failed_tests=0 + test_suites=("baseline_test" "security_test" "authz_test") + for test in "${test_suites[@]}"; do + run_test $test || (( ++failed_tests )) + done + echo "Failed test suites: ${failed_tests}" +} + +main "$@" diff --git a/test/kokoro/xds.cfg b/test/kokoro/xds.cfg index d1a078217b84..a1e4ed0bb5e6 100644 --- a/test/kokoro/xds.cfg +++ b/test/kokoro/xds.cfg @@ -2,7 +2,7 @@ # Location of the continuous shell script in repository. build_file: "grpc-go/test/kokoro/xds.sh" -timeout_mins: 120 +timeout_mins: 360 action { define_artifacts { regex: "**/*sponge_log.*" diff --git a/test/kokoro/xds.sh b/test/kokoro/xds.sh index e75743dd9e57..ca676f9d58ed 100755 --- a/test/kokoro/xds.sh +++ b/test/kokoro/xds.sh @@ -7,16 +7,16 @@ cd github export GOPATH="${HOME}/gopath" pushd grpc-go/interop/xds/client -branch=$(git branch --all --no-color --contains "${KOKORO_GITHUB_COMMIT}" \ - | grep -v HEAD | head -1) -shopt -s extglob -branch="${branch//[[:space:]]}" -branch="${branch##remotes/origin/}" -shopt -u extglob -go build +# Install a version of Go supported by gRPC for the new features, e.g. +# errors.Is() +curl --retry 3 -O -L https://go.dev/dl/go1.17.3.linux-amd64.tar.gz +sudo tar -C /usr/local -xf go1.17.3.linux-amd64.tar.gz +sudo ln -s /usr/local/go/bin/go /usr/bin/go +# Retry go build on errors (e.g. go get connection errors), for at most 3 times +for i in 1 2 3; do go build && break || sleep 5; done popd -git clone -b "${branch}" --single-branch --depth=1 https://github.com/grpc/grpc.git +git clone -b master --single-branch --depth=1 https://github.com/grpc/grpc.git grpc/tools/run_tests/helper_scripts/prep_xds.sh @@ -27,10 +27,10 @@ grpc/tools/run_tests/helper_scripts/prep_xds.sh # they are added into "all". GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ python3 grpc/tools/run_tests/run_xds_tests.py \ - --test_case="all,path_matching,header_matching,circuit_breaking,timeout,fault_injection" \ + --test_case="ping_pong,circuit_breaking" \ --project_id=grpc-testing \ --project_num=830293263384 \ - --source_image=projects/grpc-testing/global/images/xds-test-server-4 \ + --source_image=projects/grpc-testing/global/images/xds-test-server-5 \ --path_to_server_binary=/java_server/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/xds-test-server \ --gcp_suffix=$(date '+%s') \ --verbose \ @@ -42,4 +42,3 @@ GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info \ {fail_on_failed_rpc} \ {rpcs_to_send} \ {metadata_to_send}" - diff --git a/test/kokoro/xds_k8s_lb.cfg b/test/kokoro/xds_k8s_lb.cfg new file mode 100644 index 000000000000..5b989a6fe073 --- /dev/null +++ b/test/kokoro/xds_k8s_lb.cfg @@ -0,0 +1,13 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-go/test/kokoro/xds_k8s_lb.sh" +timeout_mins: 180 + +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*.log" + strip_prefix: "artifacts" + } +} diff --git a/test/kokoro/xds_k8s_lb.sh b/test/kokoro/xds_k8s_lb.sh new file mode 100755 index 000000000000..5876e924fad6 --- /dev/null +++ b/test/kokoro/xds_k8s_lb.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash +# Copyright 2022 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# Constants +readonly GITHUB_REPOSITORY_NAME="grpc-go" +readonly TEST_DRIVER_INSTALL_SCRIPT_URL="https://raw.githubusercontent.com/${TEST_DRIVER_REPO_OWNER:-grpc}/grpc/${TEST_DRIVER_BRANCH:-master}/tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh" +## xDS test server/client Docker images +readonly SERVER_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-server" +readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-client" +readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" + +####################################### +# Builds test app Docker images and pushes them to GCR +# Globals: +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test, f.e. v1.42.x, master +# Arguments: +# None +# Outputs: +# Writes the output of `gcloud builds submit` to stdout, stderr +####################################### +build_test_app_docker_images() { + echo "Building Go xDS interop test app Docker images" + docker build -f "${SRC_DIR}/interop/xds/client/Dockerfile" -t "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + docker build -f "${SRC_DIR}/interop/xds/server/Dockerfile" -t "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + gcloud -q auth configure-docker + docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" + docker push "${SERVER_IMAGE_NAME}:${GIT_COMMIT}" + if is_version_branch "${TESTING_VERSION}"; then + tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" + tag_and_push_docker_image "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" + fi +} + +####################################### +# Builds test app and its docker images unless they already exist +# Globals: +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# FORCE_IMAGE_BUILD +# Arguments: +# None +# Outputs: +# Writes the output to stdout, stderr +####################################### +build_docker_images_if_needed() { + # Check if images already exist + server_tags="$(gcloud_gcr_list_image_tags "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Server image: %s:%s\n" "${SERVER_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${server_tags:-Server image not found}" + + client_tags="$(gcloud_gcr_list_image_tags "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Client image: %s:%s\n" "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${client_tags:-Client image not found}" + + # Build if any of the images are missing, or FORCE_IMAGE_BUILD=1 + if [[ "${FORCE_IMAGE_BUILD}" == "1" || -z "${server_tags}" || -z "${client_tags}" ]]; then + build_test_app_docker_images + else + echo "Skipping Go test app build" + fi +} + +####################################### +# Executes the test case +# Globals: +# TEST_DRIVER_FLAGFILE: Relative path to test driver flagfile +# KUBE_CONTEXT: The name of kubectl context with GKE cluster access +# SECONDARY_KUBE_CONTEXT: The name of kubectl context with secondary GKE cluster access, if any +# TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report +# SERVER_IMAGE_NAME: Test server Docker image name +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test: used by the framework to determine the supported PSM +# features. +# Arguments: +# Test case name +# Outputs: +# Writes the output of test execution to stdout, stderr +# Test xUnit report to ${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml +####################################### +run_test() { + # Test driver usage: + # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage + local test_name="${1:?Usage: run_test test_name}" + local out_dir="${TEST_XML_OUTPUT_DIR}/${test_name}" + mkdir -pv "${out_dir}" + set -x + python -m "tests.${test_name}" \ + --flagfile="${TEST_DRIVER_FLAGFILE}" \ + --kube_context="${KUBE_CONTEXT}" \ + --secondary_kube_context="${SECONDARY_KUBE_CONTEXT}" \ + --server_image="${SERVER_IMAGE_NAME}:${GIT_COMMIT}" \ + --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --testing_version="${TESTING_VERSION}" \ + --force_cleanup \ + --collect_app_logs \ + --log_dir="${out_dir}" \ + --xml_output_file="${out_dir}/sponge_log.xml" \ + |& tee "${out_dir}/sponge_log.log" +} + +####################################### +# Main function: provision software necessary to execute tests, and run them +# Globals: +# KOKORO_ARTIFACTS_DIR +# GITHUB_REPOSITORY_NAME +# SRC_DIR: Populated with absolute path to the source repo +# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing +# the test driver +# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code +# TEST_DRIVER_FLAGFILE: Populated with relative path to test driver flagfile +# TEST_XML_OUTPUT_DIR: Populated with the path to test xUnit XML report +# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build +# GIT_COMMIT: Populated with the SHA-1 of git commit being built +# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built +# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access +# Arguments: +# None +# Outputs: +# Writes the output of test execution to stdout, stderr +####################################### +main() { + local script_dir + script_dir="$(dirname "$0")" + + # Source the test driver from the master branch. + echo "Sourcing test driver install script from: ${TEST_DRIVER_INSTALL_SCRIPT_URL}" + source /dev/stdin <<< "$(curl -s "${TEST_DRIVER_INSTALL_SCRIPT_URL}")" + + activate_gke_cluster GKE_CLUSTER_PSM_LB + activate_secondary_gke_cluster GKE_CLUSTER_PSM_LB + + set -x + if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then + kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}" + else + local_setup_test_driver "${script_dir}" + fi + build_docker_images_if_needed + # Run tests + cd "${TEST_DRIVER_FULL_DIR}" + local failed_tests=0 + test_suites=( + "affinity_test" + "api_listener_test" + "change_backend_service_test" + "custom_lb_test" + "failover_test" + "outlier_detection_test" + "remove_neg_test" + "round_robin_test" + ) + for test in "${test_suites[@]}"; do + run_test $test || (( ++failed_tests )) + done + echo "Failed test suites: ${failed_tests}" +} + +main "$@" diff --git a/test/kokoro/xds_url_map.cfg b/test/kokoro/xds_url_map.cfg new file mode 100644 index 000000000000..49ebc48e93c6 --- /dev/null +++ b/test/kokoro/xds_url_map.cfg @@ -0,0 +1,13 @@ +# Config file for internal CI + +# Location of the continuous shell script in repository. +build_file: "grpc-go/test/kokoro/xds_url_map.sh" +timeout_mins: 60 + +action { + define_artifacts { + regex: "artifacts/**/*sponge_log.xml" + regex: "artifacts/**/*.log" + strip_prefix: "artifacts" + } +} diff --git a/test/kokoro/xds_url_map.sh b/test/kokoro/xds_url_map.sh new file mode 100755 index 000000000000..a571ea1f00ff --- /dev/null +++ b/test/kokoro/xds_url_map.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash +# Copyright 2021 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# Constants +readonly GITHUB_REPOSITORY_NAME="grpc-go" +readonly TEST_DRIVER_INSTALL_SCRIPT_URL="https://raw.githubusercontent.com/${TEST_DRIVER_REPO_OWNER:-grpc}/grpc/${TEST_DRIVER_BRANCH:-master}/tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh" +## xDS test client Docker images +readonly CLIENT_IMAGE_NAME="gcr.io/grpc-testing/xds-interop/go-client" +readonly FORCE_IMAGE_BUILD="${FORCE_IMAGE_BUILD:-0}" + +####################################### +# Builds test app Docker images and pushes them to GCR +# Globals: +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# Arguments: +# None +# Outputs: +# Writes the output of `gcloud builds submit` to stdout, stderr +####################################### +build_test_app_docker_images() { + echo "Building Go xDS interop test app Docker images" + docker build -f "${SRC_DIR}/interop/xds/client/Dockerfile" -t "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" "${SRC_DIR}" + gcloud -q auth configure-docker + docker push "${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" + if is_version_branch "${TESTING_VERSION}"; then + tag_and_push_docker_image "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" "${TESTING_VERSION}" + fi +} + +####################################### +# Builds test app and its docker images unless they already exist +# Globals: +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# FORCE_IMAGE_BUILD +# Arguments: +# None +# Outputs: +# Writes the output to stdout, stderr +####################################### +build_docker_images_if_needed() { + # Check if images already exist + client_tags="$(gcloud_gcr_list_image_tags "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}")" + printf "Client image: %s:%s\n" "${CLIENT_IMAGE_NAME}" "${GIT_COMMIT}" + echo "${client_tags:-Client image not found}" + + # Build if any of the images are missing, or FORCE_IMAGE_BUILD=1 + if [[ "${FORCE_IMAGE_BUILD}" == "1" || -z "${client_tags}" ]]; then + build_test_app_docker_images + else + echo "Skipping Go test app build" + fi +} + +####################################### +# Executes the test case +# Globals: +# TEST_DRIVER_FLAGFILE: Relative path to test driver flagfile +# KUBE_CONTEXT: The name of kubectl context with GKE cluster access +# TEST_XML_OUTPUT_DIR: Output directory for the test xUnit XML report +# CLIENT_IMAGE_NAME: Test client Docker image name +# GIT_COMMIT: SHA-1 of git commit being built +# TESTING_VERSION: version branch under test: used by the framework to determine the supported PSM +# features. +# Arguments: +# Test case name +# Outputs: +# Writes the output of test execution to stdout, stderr +# Test xUnit report to ${TEST_XML_OUTPUT_DIR}/${test_name}/sponge_log.xml +####################################### +run_test() { + # Test driver usage: + # https://github.com/grpc/grpc/tree/master/tools/run_tests/xds_k8s_test_driver#basic-usage + local test_name="${1:?Usage: run_test test_name}" + local out_dir="${TEST_XML_OUTPUT_DIR}/${test_name}" + mkdir -pv "${out_dir}" + set -x + python -m "tests.${test_name}" \ + --flagfile="${TEST_DRIVER_FLAGFILE}" \ + --flagfile="config/url-map.cfg" \ + --kube_context="${KUBE_CONTEXT}" \ + --client_image="${CLIENT_IMAGE_NAME}:${GIT_COMMIT}" \ + --testing_version="${TESTING_VERSION}" \ + --collect_app_logs \ + --log_dir="${out_dir}" \ + --xml_output_file="${out_dir}/sponge_log.xml" \ + |& tee "${out_dir}/sponge_log.log" +} + +####################################### +# Main function: provision software necessary to execute tests, and run them +# Globals: +# KOKORO_ARTIFACTS_DIR +# GITHUB_REPOSITORY_NAME +# SRC_DIR: Populated with absolute path to the source repo +# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing +# the test driver +# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code +# TEST_DRIVER_FLAGFILE: Populated with relative path to test driver flagfile +# TEST_XML_OUTPUT_DIR: Populated with the path to test xUnit XML report +# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build +# GIT_COMMIT: Populated with the SHA-1 of git commit being built +# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built +# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access +# Arguments: +# None +# Outputs: +# Writes the output of test execution to stdout, stderr +####################################### +main() { + local script_dir + script_dir="$(dirname "$0")" + + # Source the test driver from the master branch. + echo "Sourcing test driver install script from: ${TEST_DRIVER_INSTALL_SCRIPT_URL}" + source /dev/stdin <<< "$(curl -s "${TEST_DRIVER_INSTALL_SCRIPT_URL}")" + + activate_gke_cluster GKE_CLUSTER_PSM_BASIC + + set -x + if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then + kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}" + else + local_setup_test_driver "${script_dir}" + fi + build_docker_images_if_needed + # Run tests + cd "${TEST_DRIVER_FULL_DIR}" + run_test url_map || echo "Failed url_map test" +} + +main "$@" diff --git a/test/kokoro/xds_v3.cfg b/test/kokoro/xds_v3.cfg index c4c8aad9e6f2..1991efd325d3 100644 --- a/test/kokoro/xds_v3.cfg +++ b/test/kokoro/xds_v3.cfg @@ -2,7 +2,7 @@ # Location of the continuous shell script in repository. build_file: "grpc-go/test/kokoro/xds_v3.sh" -timeout_mins: 120 +timeout_mins: 360 action { define_artifacts { regex: "**/*sponge_log.*" diff --git a/test/local_creds_test.go b/test/local_creds_test.go index 3933bb39635b..b1cabdbb7e56 100644 --- a/test/local_creds_test.go +++ b/test/local_creds_test.go @@ -29,12 +29,14 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/local" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func testLocalCredsE2ESucceed(network, address string) error { @@ -72,7 +74,7 @@ func testLocalCredsE2ESucceed(network, address string) error { s := grpc.NewServer(sopts...) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen(network, address) if err != nil { @@ -100,7 +102,7 @@ func testLocalCredsE2ESucceed(network, address string) error { } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -170,7 +172,7 @@ func testLocalCredsE2EFail(dopts []grpc.DialOption) error { s := grpc.NewServer(sopts...) defer s.Stop() - testpb.RegisterTestServiceServer(s, ss) + testgrpc.RegisterTestServiceServer(s, ss) lis, err := net.Listen("tcp", "localhost:0") if err != nil { @@ -195,7 +197,7 @@ func testLocalCredsE2EFail(dopts []grpc.DialOption) error { } defer cc.Close() - c := testpb.NewTestServiceClient(cc) + c := testgrpc.NewTestServiceClient(cc) ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -218,7 +220,7 @@ func (s) TestLocalCredsClientFail(t *testing.T) { func (s) TestLocalCredsServerFail(t *testing.T) { // Use insecure at client-side which should lead to server-side failure. - opts := []grpc.DialOption{grpc.WithInsecure()} + opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())} if err := testLocalCredsE2EFail(opts); status.Code(err) != codes.Unavailable { t.Fatalf("testLocalCredsE2EFail() = %v; want %v", err, codes.Unavailable) } diff --git a/test/metadata_test.go b/test/metadata_test.go new file mode 100644 index 000000000000..e05d0172eaad --- /dev/null +++ b/test/metadata_test.go @@ -0,0 +1,156 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func (s) TestInvalidMetadata(t *testing.T) { + grpctest.TLogger.ExpectErrorN("stream: failed to validate md when setting trailer", 5) + + tests := []struct { + name string + md metadata.MD + appendMD []string + want error + recv error + }{ + { + name: "invalid key", + md: map[string][]string{string(rune(0x19)): {"testVal"}}, + want: status.Error(codes.Internal, "header key \"\\x19\" contains illegal characters not in [0-9a-z-_.]"), + recv: status.Error(codes.Internal, "invalid header field"), + }, + { + name: "invalid value", + md: map[string][]string{"test": {string(rune(0x19))}}, + want: status.Error(codes.Internal, "header key \"test\" contains value with non-printable ASCII characters"), + recv: status.Error(codes.Internal, "invalid header field"), + }, + { + name: "invalid appended value", + md: map[string][]string{"test": {"test"}}, + appendMD: []string{"/", "value"}, + want: status.Error(codes.Internal, "header key \"/\" contains illegal characters not in [0-9a-z-_.]"), + recv: status.Error(codes.Internal, "invalid header field"), + }, + { + name: "empty appended key", + md: map[string][]string{"test": {"test"}}, + appendMD: []string{"", "value"}, + want: status.Error(codes.Internal, "there is an empty key in the header"), + recv: status.Error(codes.Internal, "invalid header field"), + }, + { + name: "empty key", + md: map[string][]string{"": {"test"}}, + want: status.Error(codes.Internal, "there is an empty key in the header"), + recv: status.Error(codes.Internal, "invalid header field"), + }, + { + name: "-bin key with arbitrary value", + md: map[string][]string{"test-bin": {string(rune(0x19))}}, + want: nil, + recv: io.EOF, + }, + { + name: "valid key and value", + md: map[string][]string{"test": {"value"}}, + want: nil, + recv: io.EOF, + }, + } + + testNum := 0 + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + _, err := stream.Recv() + if err != nil { + return err + } + test := tests[testNum] + testNum++ + // merge original md and added md. + md := metadata.Join(test.md, metadata.Pairs(test.appendMD...)) + + if err := stream.SetHeader(md); !reflect.DeepEqual(test.want, err) { + return fmt.Errorf("call stream.SendHeader(md) validate metadata which is %v got err :%v, want err :%v", md, err, test.want) + } + if err := stream.SendHeader(md); !reflect.DeepEqual(test.want, err) { + return fmt.Errorf("call stream.SendHeader(md) validate metadata which is %v got err :%v, want err :%v", md, err, test.want) + } + stream.SetTrailer(md) + return nil + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting ss endpoint server: %v", err) + } + defer ss.Stop() + + for _, test := range tests { + t.Run("unary "+test.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + ctx = metadata.NewOutgoingContext(ctx, test.md) + ctx = metadata.AppendToOutgoingContext(ctx, test.appendMD...) + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); !reflect.DeepEqual(test.want, err) { + t.Errorf("call ss.Client.EmptyCall() validate metadata which is %v got err :%v, want err :%v", test.md, err, test.want) + } + }) + } + + // call the stream server's api to drive the server-side unit testing + for _, test := range tests { + t.Run("streaming "+test.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Errorf("call ss.Client.FullDuplexCall(context.Background()) will success but got err :%v", err) + return + } + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Errorf("call ss.Client stream Send(nil) will success but got err :%v", err) + } + if _, err := stream.Recv(); status.Code(err) != status.Code(test.recv) || !strings.Contains(err.Error(), test.recv.Error()) { + t.Errorf("stream.Recv() = _, get err :%v, want err :%v", err, test.recv) + } + }) + } +} diff --git a/test/parse_config.go b/test/parse_config.go new file mode 100644 index 000000000000..f375a3aa8a18 --- /dev/null +++ b/test/parse_config.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "testing" + + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" +) + +// parseServiceConfig is a test helper which uses the manual resolver to parse +// the given service config. It calls t.Fatal() if service config parsing fails. +func parseServiceConfig(t *testing.T, r *manual.Resolver, sc string) *serviceconfig.ParseResult { + t.Helper() + + scpr := r.CC.ParseServiceConfig(sc) + if scpr.Err != nil { + t.Fatalf("Failed to parse service config %q: %v", sc, scpr.Err) + } + return scpr +} diff --git a/test/pickfirst_test.go b/test/pickfirst_test.go new file mode 100644 index 000000000000..d41786a556a3 --- /dev/null +++ b/test/pickfirst_test.go @@ -0,0 +1,687 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/pickfirst" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +const pickFirstServiceConfig = `{"loadBalancingConfig": [{"pick_first":{}}]}` + +// setupPickFirst performs steps required for pick_first tests. It starts a +// bunch of backends exporting the TestService, creates a ClientConn to them +// with service config specifying the use of the pick_first LB policy. +func setupPickFirst(t *testing.T, backendCount int, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer) { + t.Helper() + + // Initialize channelz. Used to determine pending RPC count. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + r := manual.NewBuilderWithScheme("whatever") + + backends := make([]*stubserver.StubServer, backendCount) + addrs := make([]resolver.Address, backendCount) + for i := 0; i < backendCount; i++ { + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + + backends[i] = backend + addrs[i] = resolver.Address{Addr: backend.Address} + } + + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(pickFirstServiceConfig), + } + dopts = append(dopts, opts...) + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // At this point, the resolver has not returned any addresses to the channel. + // This RPC must block until the context expires. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + return cc, r, backends +} + +// stubBackendsToResolverAddrs converts from a set of stub server backends to +// resolver addresses. Useful when pushing addresses to the manual resolver. +func stubBackendsToResolverAddrs(backends []*stubserver.StubServer) []resolver.Address { + addrs := make([]resolver.Address, len(backends)) + for i, backend := range backends { + addrs[i] = resolver.Address{Addr: backend.Address} + } + return addrs +} + +// TestPickFirst_OneBackend tests the most basic scenario for pick_first. It +// brings up a single backend and verifies that all RPCs get routed to it. +func (s) TestPickFirst_OneBackend(t *testing.T) { + cc, r, backends := setupPickFirst(t, 1) + + addrs := stubBackendsToResolverAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } +} + +// TestPickFirst_MultipleBackends tests the scenario with multiple backends and +// verifies that all RPCs get routed to the first one. +func (s) TestPickFirst_MultipleBackends(t *testing.T) { + cc, r, backends := setupPickFirst(t, 2) + + addrs := stubBackendsToResolverAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } +} + +// TestPickFirst_OneServerDown tests the scenario where we have multiple +// backends and pick_first is working as expected. Verifies that RPCs get routed +// to the next backend in the list when the first one goes down. +func (s) TestPickFirst_OneServerDown(t *testing.T) { + cc, r, backends := setupPickFirst(t, 2) + + addrs := stubBackendsToResolverAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Stop the backend which is currently being used. RPCs should get routed to + // the next backend in the list. + backends[0].Stop() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } +} + +// TestPickFirst_AllServersDown tests the scenario where we have multiple +// backends and pick_first is working as expected. When all backends go down, +// the test verifies that RPCs fail with appropriate status code. +func (s) TestPickFirst_AllServersDown(t *testing.T) { + cc, r, backends := setupPickFirst(t, 2) + + addrs := stubBackendsToResolverAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + for _, b := range backends { + b.Stop() + } + + client := testgrpc.NewTestServiceClient(cc) + for { + if ctx.Err() != nil { + t.Fatalf("channel failed to move to Unavailable after all backends were stopped: %v", ctx.Err()) + } + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) == codes.Unavailable { + return + } + time.Sleep(defaultTestShortTimeout) + } +} + +// TestPickFirst_AddressesRemoved tests the scenario where we have multiple +// backends and pick_first is working as expected. It then verifies that when +// addresses are removed by the name resolver, RPCs get routed appropriately. +func (s) TestPickFirst_AddressesRemoved(t *testing.T) { + cc, r, backends := setupPickFirst(t, 3) + + addrs := stubBackendsToResolverAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Remove the first backend from the list of addresses originally pushed. + // RPCs should get routed to the first backend in the new list. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[1], addrs[2]}}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + // Append the backend that we just removed to the end of the list. + // Nothing should change. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[1], addrs[2], addrs[0]}}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } + + // Remove the first backend from the existing list of addresses. + // RPCs should get routed to the first backend in the new list. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[2], addrs[0]}}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[2]); err != nil { + t.Fatal(err) + } + + // Remove the first backend from the existing list of addresses. + // RPCs should get routed to the first backend in the new list. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0]}}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } +} + +// TestPickFirst_NewAddressWhileBlocking tests the case where pick_first is +// configured on a channel, things are working as expected and then a resolver +// updates removes all addresses. An RPC attempted at this point in time will be +// blocked because there are no valid backends. This test verifies that when new +// backends are added, the RPC is able to complete. +func (s) TestPickFirst_NewAddressWhileBlocking(t *testing.T) { + cc, r, backends := setupPickFirst(t, 2) + addrs := stubBackendsToResolverAddrs(backends) + r.UpdateState(resolver.State{Addresses: addrs}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Send a resolver update with no addresses. This should push the channel into + // TransientFailure. + r.UpdateState(resolver.State{}) + awaitState(ctx, t, cc, connectivity.TransientFailure) + + doneCh := make(chan struct{}) + client := testgrpc.NewTestServiceClient(cc) + go func() { + // The channel is currently in TransientFailure and this RPC will block + // until the channel becomes Ready, which will only happen when we push a + // resolver update with a valid backend address. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Errorf("EmptyCall() = %v, want ", err) + } + close(doneCh) + }() + + // Make sure that there is one pending RPC on the ClientConn before attempting + // to push new addresses through the name resolver. If we don't do this, the + // resolver update can happen before the above goroutine gets to make the RPC. + for { + if err := ctx.Err(); err != nil { + t.Fatal(err) + } + tcs, _ := channelz.GetTopChannels(0, 0) + if len(tcs) != 1 { + t.Fatalf("there should only be one top channel, not %d", len(tcs)) + } + started := tcs[0].ChannelData.CallsStarted + completed := tcs[0].ChannelData.CallsSucceeded + tcs[0].ChannelData.CallsFailed + if (started - completed) == 1 { + break + } + time.Sleep(defaultTestShortTimeout) + } + + // Send a resolver update with a valid backend to push the channel to Ready + // and unblock the above RPC. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backends[0].Address}}}) + + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for blocked RPC to complete") + case <-doneCh: + } +} + +// TestPickFirst_StickyTransientFailure tests the case where pick_first is +// configured on a channel, and the backend is configured to close incoming +// connections as soon as they are accepted. The test verifies that the channel +// enters TransientFailure and stays there. The test also verifies that the +// pick_first LB policy is constantly trying to reconnect to the backend. +func (s) TestPickFirst_StickyTransientFailure(t *testing.T) { + // Spin up a local server which closes the connection as soon as it receives + // one. It also sends a signal on a channel whenver it received a connection. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("Failed to create listener: %v", err) + } + t.Cleanup(func() { lis.Close() }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + connCh := make(chan struct{}, 1) + go func() { + for { + conn, err := lis.Accept() + if err != nil { + return + } + select { + case connCh <- struct{}{}: + conn.Close() + case <-ctx.Done(): + return + } + } + }() + + // Dial the above server with a ConnectParams that does a constant backoff + // of defaultTestShortTimeout duration. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultServiceConfig(pickFirstServiceConfig), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: defaultTestShortTimeout, + Multiplier: float64(0), + Jitter: float64(0), + MaxDelay: defaultTestShortTimeout, + }, + }), + } + cc, err := grpc.Dial(lis.Addr().String(), dopts...) + if err != nil { + t.Fatalf("Failed to dial server at %q: %v", lis.Addr(), err) + } + t.Cleanup(func() { cc.Close() }) + + awaitState(ctx, t, cc, connectivity.TransientFailure) + + // Spawn a goroutine to ensure that the channel stays in TransientFailure. + // The call to cc.WaitForStateChange will return false when the main + // goroutine exits and the context is cancelled. + go func() { + if cc.WaitForStateChange(ctx, connectivity.TransientFailure) { + if state := cc.GetState(); state != connectivity.Shutdown { + t.Errorf("Unexpected state change from TransientFailure to %s", cc.GetState()) + } + } + }() + + // Ensures that the pick_first LB policy is constantly trying to reconnect. + for i := 0; i < 10; i++ { + select { + case <-connCh: + case <-time.After(2 * defaultTestShortTimeout): + t.Error("Timeout when waiting for pick_first to reconnect") + } + } +} + +// Tests the PF LB policy with shuffling enabled. +func (s) TestPickFirst_ShuffleAddressList(t *testing.T) { + defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig) + envconfig.PickFirstLBConfig = true + const serviceConfig = `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}` + + // Install a shuffler that always reverses two entries. + origShuf := grpcrand.Shuffle + defer func() { grpcrand.Shuffle = origShuf }() + grpcrand.Shuffle = func(n int, f func(int, int)) { + if n != 2 { + t.Errorf("Shuffle called with n=%v; want 2", n) + return + } + f(0, 1) // reverse the two addresses + } + + // Set up our backends. + cc, r, backends := setupPickFirst(t, 2) + addrs := stubBackendsToResolverAddrs(backends) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Push an update with both addresses and shuffling disabled. We should + // connect to backend 0. + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrs[0], addrs[1]}}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Send a config with shuffling enabled. This will reverse the addresses, + // but the channel should still be connected to backend 0. + shufState := resolver.State{ + ServiceConfig: parseServiceConfig(t, r, serviceConfig), + Addresses: []resolver.Address{addrs[0], addrs[1]}, + } + r.UpdateState(shufState) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Send a resolver update with no addresses. This should push the channel + // into TransientFailure. + r.UpdateState(resolver.State{}) + awaitState(ctx, t, cc, connectivity.TransientFailure) + + // Send the same config as last time with shuffling enabled. Since we are + // not connected to backend 0, we should connect to backend 1. + r.UpdateState(shufState) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } +} + +// Tests the PF LB policy with the environment variable support of address list +// shuffling disabled. +func (s) TestPickFirst_ShuffleAddressListDisabled(t *testing.T) { + defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig) + envconfig.PickFirstLBConfig = false + const serviceConfig = `{"loadBalancingConfig": [{"pick_first":{ "shuffleAddressList": true }}]}` + + // Install a shuffler that always reverses two entries. + origShuf := grpcrand.Shuffle + defer func() { grpcrand.Shuffle = origShuf }() + grpcrand.Shuffle = func(n int, f func(int, int)) { + if n != 2 { + t.Errorf("Shuffle called with n=%v; want 2", n) + return + } + f(0, 1) // reverse the two addresses + } + + // Set up our backends. + cc, r, backends := setupPickFirst(t, 2) + addrs := stubBackendsToResolverAddrs(backends) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Send a config with shuffling enabled. This will reverse the addresses, + // so we should connect to backend 1 if shuffling is supported. However + // with it disabled at the start of the test, we will connect to backend 0 + // instead. + shufState := resolver.State{ + ServiceConfig: parseServiceConfig(t, r, serviceConfig), + Addresses: []resolver.Address{addrs[0], addrs[1]}, + } + r.UpdateState(shufState) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } +} + +// setupPickFirstWithListenerWrapper is very similar to setupPickFirst, but uses +// a wrapped listener that the test can use to track accepted connections. +func setupPickFirstWithListenerWrapper(t *testing.T, backendCount int, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer, []*testutils.ListenerWrapper) { + t.Helper() + + // Initialize channelz. Used to determine pending RPC count. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + backends := make([]*stubserver.StubServer, backendCount) + addrs := make([]resolver.Address, backendCount) + listeners := make([]*testutils.ListenerWrapper, backendCount) + for i := 0; i < backendCount; i++ { + lis := testutils.NewListenerWrapper(t, nil) + backend := &stubserver.StubServer{ + Listener: lis, + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + + backends[i] = backend + addrs[i] = resolver.Address{Addr: backend.Address} + listeners[i] = lis + } + + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(pickFirstServiceConfig), + } + dopts = append(dopts, opts...) + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // At this point, the resolver has not returned any addresses to the channel. + // This RPC must block until the context expires. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + return cc, r, backends, listeners +} + +// TestPickFirst_AddressUpdateWithAttributes tests the case where an address +// update received by the pick_first LB policy differs in attributes. Addresses +// which differ in attributes are considered different from the perspective of +// subconn creation and connection establishment and the test verifies that new +// connections are created when attributes change. +func (s) TestPickFirst_AddressUpdateWithAttributes(t *testing.T) { + cc, r, backends, listeners := setupPickFirstWithListenerWrapper(t, 2) + + // Add a set of attributes to the addresses before pushing them to the + // pick_first LB policy through the manual resolver. + addrs := stubBackendsToResolverAddrs(backends) + for i := range addrs { + addrs[i].Attributes = addrs[i].Attributes.WithValue("test-attribute-1", fmt.Sprintf("%d", i)) + } + r.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that RPCs succeed to the first backend in the list. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Grab the wrapped connection from the listener wrapper. This will be used + // to verify the connection is closed. + val, err := listeners[0].NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive new connection from wrapped listener: %v", err) + } + conn := val.(*testutils.ConnWrapper) + + // Add another set of attributes to the addresses, and push them to the + // pick_first LB policy through the manual resolver. Leave the order of the + // addresses unchanged. + for i := range addrs { + addrs[i].Attributes = addrs[i].Attributes.WithValue("test-attribute-2", fmt.Sprintf("%d", i)) + } + r.UpdateState(resolver.State{Addresses: addrs}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // A change in the address attributes results in the new address being + // considered different to the current address. This will result in the old + // connection being closed and a new connection to the same backend (since + // address order is not modified). + if _, err := conn.CloseCh.Receive(ctx); err != nil { + t.Fatalf("Timeout when expecting existing connection to be closed: %v", err) + } + val, err = listeners[0].NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive new connection from wrapped listener: %v", err) + } + conn = val.(*testutils.ConnWrapper) + + // Add another set of attributes to the addresses, and push them to the + // pick_first LB policy through the manual resolver. Reverse of the order + // of addresses. + for i := range addrs { + addrs[i].Attributes = addrs[i].Attributes.WithValue("test-attribute-3", fmt.Sprintf("%d", i)) + } + addrs[0], addrs[1] = addrs[1], addrs[0] + r.UpdateState(resolver.State{Addresses: addrs}) + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Ensure that the old connection is closed and a new connection is + // established to the first address in the new list. + if _, err := conn.CloseCh.Receive(ctx); err != nil { + t.Fatalf("Timeout when expecting existing connection to be closed: %v", err) + } + _, err = listeners[1].NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive new connection from wrapped listener: %v", err) + } +} + +// TestPickFirst_AddressUpdateWithBalancerAttributes tests the case where an +// address update received by the pick_first LB policy differs in balancer +// attributes, which are meant only for consumption by LB policies. In this +// case, the test verifies that new connections are not created when the address +// update only changes the balancer attributes. +func (s) TestPickFirst_AddressUpdateWithBalancerAttributes(t *testing.T) { + cc, r, backends, listeners := setupPickFirstWithListenerWrapper(t, 2) + + // Add a set of balancer attributes to the addresses before pushing them to + // the pick_first LB policy through the manual resolver. + addrs := stubBackendsToResolverAddrs(backends) + for i := range addrs { + addrs[i].BalancerAttributes = addrs[i].BalancerAttributes.WithValue("test-attribute-1", fmt.Sprintf("%d", i)) + } + r.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that RPCs succeed to the expected backend. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Grab the wrapped connection from the listener wrapper. This will be used + // to verify the connection is not closed. + val, err := listeners[0].NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive new connection from wrapped listener: %v", err) + } + conn := val.(*testutils.ConnWrapper) + + // Add a set of balancer attributes to the addresses before pushing them to + // the pick_first LB policy through the manual resolver. Leave the order of + // the addresses unchanged. + for i := range addrs { + addrs[i].BalancerAttributes = addrs[i].BalancerAttributes.WithValue("test-attribute-2", fmt.Sprintf("%d", i)) + } + r.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that no new connection is established, and ensure that the old + // connection is not closed. + for i := range listeners { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := listeners[i].NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Unexpected error when expecting no new connection: %v", err) + } + } + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Unexpected error when expecting existing connection to stay active: %v", err) + } + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[0]); err != nil { + t.Fatal(err) + } + + // Add a set of balancer attributes to the addresses before pushing them to + // the pick_first LB policy through the manual resolver. Reverse of the + // order of addresses. + for i := range addrs { + addrs[i].BalancerAttributes = addrs[i].BalancerAttributes.WithValue("test-attribute-3", fmt.Sprintf("%d", i)) + } + addrs[0], addrs[1] = addrs[1], addrs[0] + r.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that no new connection is established, and ensure that the old + // connection is not closed. + for i := range listeners { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := listeners[i].NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Unexpected error when expecting no new connection: %v", err) + } + } + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("Unexpected error when expecting existing connection to stay active: %v", err) + } + if err := pickfirst.CheckRPCsToBackend(ctx, cc, addrs[1]); err != nil { + t.Fatal(err) + } +} diff --git a/test/race.go b/test/race_test.go similarity index 97% rename from test/race.go rename to test/race_test.go index acfa0dfae37c..d99f0a410ac6 100644 --- a/test/race.go +++ b/test/race_test.go @@ -1,3 +1,4 @@ +//go:build race // +build race /* diff --git a/test/recv_buffer_pool_test.go b/test/recv_buffer_pool_test.go new file mode 100644 index 000000000000..8bb6db4a77af --- /dev/null +++ b/test/recv_buffer_pool_test.go @@ -0,0 +1,90 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "bytes" + "context" + "io" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/internal/stubserver" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func (s) TestRecvBufferPool(t *testing.T) { + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + for i := 0; i < 10; i++ { + preparedMsg := &grpc.PreparedMsg{} + err := preparedMsg.Encode(stream, &testpb.StreamingOutputCallResponse{ + Payload: &testpb.Payload{ + Body: []byte{'0' + uint8(i)}, + }, + }) + if err != nil { + return err + } + stream.SendMsg(preparedMsg) + } + return nil + }, + } + if err := ss.Start( + []grpc.ServerOption{grpc.RecvBufferPool(grpc.NewSharedBufferPool())}, + grpc.WithRecvBufferPool(grpc.NewSharedBufferPool()), + ); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("ss.Client.FullDuplexCall failed: %f", err) + } + + var ngot int + var buf bytes.Buffer + for { + reply, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + ngot++ + if buf.Len() > 0 { + buf.WriteByte(',') + } + buf.Write(reply.GetPayload().GetBody()) + } + if want := 10; ngot != want { + t.Errorf("Got %d replies, want %d", ngot, want) + } + if got, want := buf.String(), "0,1,2,3,4,5,6,7,8,9"; got != want { + t.Errorf("Got replies %q; want %q", got, want) + } +} diff --git a/test/resolver_update_test.go b/test/resolver_update_test.go new file mode 100644 index 000000000000..416f7175c53a --- /dev/null +++ b/test/resolver_update_test.go @@ -0,0 +1,263 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// TestResolverUpdateDuringBuild_ServiceConfigParseError makes the +// resolver.Builder call into the ClientConn, during the Build call, with a +// service config parsing error. +// +// We use two separate mutexes in the code which make sure there is no data race +// in this code path, and also that there is no deadlock. +func (s) TestResolverUpdateDuringBuild_ServiceConfigParseError(t *testing.T) { + // Setting InitialState on the manual resolver makes it call into the + // ClientConn during the Build call. + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Err: errors.New("resolver build err")}}) + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + const wantMsg = "error parsing service config" + const wantCode = codes.Unavailable + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { + t.Fatalf("EmptyCall RPC failed: %v; want code: %v, want message: %q", err, wantCode, wantMsg) + } +} + +type fakeConfig struct { + serviceconfig.Config +} + +// TestResolverUpdateDuringBuild_ServiceConfigInvalidTypeError makes the +// resolver.Builder call into the ClientConn, during the Build call, with an +// invalid service config type. +// +// We use two separate mutexes in the code which make sure there is no data race +// in this code path, and also that there is no deadlock. +func (s) TestResolverUpdateDuringBuild_ServiceConfigInvalidTypeError(t *testing.T) { + // Setting InitialState on the manual resolver makes it call into the + // ClientConn during the Build call. + r := manual.NewBuilderWithScheme("whatever") + r.InitialState(resolver.State{ServiceConfig: &serviceconfig.ParseResult{Config: fakeConfig{}}}) + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + const wantMsg = "illegal service config type" + const wantCode = codes.Unavailable + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { + t.Fatalf("EmptyCall RPC failed: %v; want code: %v, want message: %q", err, wantCode, wantMsg) + } +} + +// TestResolverUpdate_InvalidServiceConfigAsFirstUpdate makes the resolver send +// an update with an invalid service config as its first update. This should +// make the ClientConn apply the failing LB policy, and should result in RPC +// errors indicating the failing service config. +func (s) TestResolverUpdate_InvalidServiceConfigAsFirstUpdate(t *testing.T) { + r := manual.NewBuilderWithScheme("whatever") + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) + } + defer cc.Close() + + scpr := r.CC.ParseServiceConfig("bad json service config") + r.UpdateState(resolver.State{ServiceConfig: scpr}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + const wantMsg = "error parsing service config" + const wantCode = codes.Unavailable + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != wantCode || !strings.Contains(status.Convert(err).Message(), wantMsg) { + t.Fatalf("EmptyCall RPC failed: %v; want code: %v, want message: %q", err, wantCode, wantMsg) + } +} + +func verifyClientConnStateUpdate(got, want balancer.ClientConnState) error { + if got, want := got.ResolverState.Addresses, want.ResolverState.Addresses; !cmp.Equal(got, want) { + return fmt.Errorf("update got unexpected addresses: %v, want %v", got, want) + } + if got, want := got.ResolverState.ServiceConfig.Config, want.ResolverState.ServiceConfig.Config; !internal.EqualServiceConfigForTesting(got, want) { + return fmt.Errorf("received unexpected service config: \ngot: %v \nwant: %v", got, want) + } + if got, want := got.BalancerConfig, want.BalancerConfig; !cmp.Equal(got, want) { + return fmt.Errorf("received unexpected balancer config: \ngot: %v \nwant: %v", cmp.Diff(nil, got), cmp.Diff(nil, want)) + } + return nil +} + +// TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate tests the scenario +// where the resolver sends an update with an invalid service config after +// having sent a good update. This should result in the ClientConn discarding +// the new invalid service config, and continuing to use the old good config. +func (s) TestResolverUpdate_InvalidServiceConfigAfterGoodUpdate(t *testing.T) { + type wrappingBalancerConfig struct { + serviceconfig.LoadBalancingConfig + Config string `json:"config,omitempty"` + } + + // Register a stub balancer which uses a "pick_first" balancer underneath and + // signals on a channel when it receives ClientConn updates. + ccUpdateCh := testutils.NewChannel() + stub.Register(t.Name(), stub.BalancerFuncs{ + Init: func(bd *stub.BalancerData) { + pf := balancer.Get(grpc.PickFirstBalancerName) + bd.Data = pf.Build(bd.ClientConn, bd.BuildOptions) + }, + ParseConfig: func(lbCfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &wrappingBalancerConfig{} + if err := json.Unmarshal(lbCfg, cfg); err != nil { + return nil, err + } + return cfg, nil + }, + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + if _, ok := ccs.BalancerConfig.(*wrappingBalancerConfig); !ok { + return fmt.Errorf("received balancer config of unsupported type %T", ccs.BalancerConfig) + } + bal := bd.Data.(balancer.Balancer) + ccUpdateCh.Send(ccs) + ccs.BalancerConfig = nil + return bal.UpdateClientConnState(ccs) + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + bal := bd.Data.(balancer.Balancer) + bal.UpdateSubConnState(sc, state) + }, + }) + + // Start a backend exposing the test service. + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + defer backend.Stop() + + r := manual.NewBuilderWithScheme("whatever") + + cc, err := grpc.Dial(r.Scheme()+":///test.server", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Dial(_, _) = _, %v; want _, nil", err) + } + defer cc.Close() + + // Push a resolver update and verify that our balancer receives the update. + addrs := []resolver.Address{{Addr: backend.Address}} + const lbCfg = "wrapping balancer LB policy config" + goodSC := r.CC.ParseServiceConfig(fmt.Sprintf(` +{ + "loadBalancingConfig": [ + { + "%v": { + "config": "%s" + } + } + ] +}`, t.Name(), lbCfg)) + r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: goodSC}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantCCS := balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addrs, + ServiceConfig: goodSC, + }, + BalancerConfig: &wrappingBalancerConfig{Config: lbCfg}, + } + ccs, err := ccUpdateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for ClientConnState update from grpc") + } + gotCCS := ccs.(balancer.ClientConnState) + if err := verifyClientConnStateUpdate(gotCCS, wantCCS); err != nil { + t.Fatal(err) + } + + // Ensure RPCs are successful. + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall RPC failed: %v", err) + } + + // Push a bad resolver update and ensure that the update is propagated to our + // stub balancer. But since the pushed update contains an invalid service + // config, our balancer should continue to see the old loadBalancingConfig. + badSC := r.CC.ParseServiceConfig("bad json service config") + wantCCS.ResolverState.ServiceConfig = badSC + r.UpdateState(resolver.State{Addresses: addrs, ServiceConfig: badSC}) + ccs, err = ccUpdateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for ClientConnState update from grpc") + } + gotCCS = ccs.(balancer.ClientConnState) + if err := verifyClientConnStateUpdate(gotCCS, wantCCS); err != nil { + t.Fatal(err) + } + + // RPCs should continue to be successful since the ClientConn is using the old + // good service config. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall RPC failed: %v", err) + } +} diff --git a/test/retry_test.go b/test/retry_test.go index f93c9ac053f7..06e2479ff67b 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -22,33 +22,33 @@ import ( "context" "fmt" "io" - "os" + "net" + "reflect" "strconv" "strings" + "sync" "testing" "time" "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" -) -func enableRetry() func() { - old := envconfig.Retry - envconfig.Retry = true - return func() { envconfig.Retry = old } -} + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) func (s) TestRetryUnary(t *testing.T) { - defer enableRetry()() i := -1 ss := &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { + EmptyCallF: func(context.Context, *testpb.Empty) (r *testpb.Empty, err error) { + defer func() { t.Logf("server call %v returning err %v", i, err) }() i++ switch i { case 0, 2, 5: @@ -59,11 +59,8 @@ func (s) TestRetryUnary(t *testing.T) { return nil, status.New(codes.AlreadyExists, "retryable error").Err() }, } - if err := ss.Start([]grpc.ServerOption{}); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - ss.NewServiceConfig(`{ + if err := ss.Start([]grpc.ServerOption{}, + grpc.WithDefaultServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, @@ -74,18 +71,10 @@ func (s) TestRetryUnary(t *testing.T) { "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "ALREADY_EXISTS" ] } - }]}`) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - for { - if ctx.Err() != nil { - t.Fatalf("Timed out waiting for service config update") - } - if ss.CC.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { - break - } - time.Sleep(time.Millisecond) + }]}`)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) } - cancel() + defer ss.Stop() testCases := []struct { code codes.Code @@ -99,69 +88,8 @@ func (s) TestRetryUnary(t *testing.T) { {codes.Internal, 11}, {codes.AlreadyExists, 15}, } - for _, tc := range testCases { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) - cancel() - if status.Code(err) != tc.code { - t.Fatalf("EmptyCall(_, _) = _, %v; want _, ", err, tc.code) - } - if i != tc.count { - t.Fatalf("i = %v; want %v", i, tc.count) - } - } -} - -func (s) TestRetryDisabledByDefault(t *testing.T) { - if strings.EqualFold(os.Getenv("GRPC_GO_RETRY"), "on") { - return - } - i := -1 - ss := &stubserver.StubServer{ - EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { - i++ - switch i { - case 0: - return nil, status.New(codes.AlreadyExists, "retryable error").Err() - } - return &testpb.Empty{}, nil - }, - } - if err := ss.Start([]grpc.ServerOption{}); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - ss.NewServiceConfig(`{ - "methodConfig": [{ - "name": [{"service": "grpc.testing.TestService"}], - "waitForReady": true, - "retryPolicy": { - "MaxAttempts": 4, - "InitialBackoff": ".01s", - "MaxBackoff": ".01s", - "BackoffMultiplier": 1.0, - "RetryableStatusCodes": [ "ALREADY_EXISTS" ] - } - }]}`) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - for { - if ctx.Err() != nil { - t.Fatalf("Timed out waiting for service config update") - } - if ss.CC.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { - break - } - time.Sleep(time.Millisecond) - } - cancel() - - testCases := []struct { - code codes.Code - count int - }{ - {codes.AlreadyExists, 0}, - } - for _, tc := range testCases { + for num, tc := range testCases { + t.Log("Case", num) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) cancel() @@ -175,7 +103,6 @@ func (s) TestRetryDisabledByDefault(t *testing.T) { } func (s) TestRetryThrottling(t *testing.T) { - defer enableRetry()() i := -1 ss := &stubserver.StubServer{ EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { @@ -187,11 +114,8 @@ func (s) TestRetryThrottling(t *testing.T) { return nil, status.New(codes.Unavailable, "retryable error").Err() }, } - if err := ss.Start([]grpc.ServerOption{}); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - ss.NewServiceConfig(`{ + if err := ss.Start([]grpc.ServerOption{}, + grpc.WithDefaultServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, @@ -207,18 +131,10 @@ func (s) TestRetryThrottling(t *testing.T) { "maxTokens": 10, "tokenRatio": 0.5 } - }`) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - for { - if ctx.Err() != nil { - t.Fatalf("Timed out waiting for service config update") - } - if ss.CC.GetMethodConfig("/grpc.testing.TestService/EmptyCall").WaitForReady != nil { - break - } - time.Sleep(time.Millisecond) + }`)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) } - cancel() + defer ss.Stop() testCases := []struct { code codes.Code @@ -251,7 +167,6 @@ func (s) TestRetryThrottling(t *testing.T) { } func (s) TestRetryStreaming(t *testing.T) { - defer enableRetry()() req := func(b byte) *testpb.StreamingOutputCallRequest { return &testpb.StreamingOutputCallRequest{Payload: &testpb.Payload{Body: []byte{b}}} } @@ -261,12 +176,12 @@ func (s) TestRetryStreaming(t *testing.T) { largePayload, _ := newPayload(testpb.PayloadType_COMPRESSABLE, 500) - type serverOp func(stream testpb.TestService_FullDuplexCallServer) error - type clientOp func(stream testpb.TestService_FullDuplexCallClient) error + type serverOp func(stream testgrpc.TestService_FullDuplexCallServer) error + type clientOp func(stream testgrpc.TestService_FullDuplexCallClient) error // Server Operations sAttempts := func(n int) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { const key = "grpc-previous-rpc-attempts" md, ok := metadata.FromIncomingContext(stream.Context()) if !ok { @@ -279,7 +194,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } sReq := func(b byte) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { want := req(b) if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want %v, ", got, err, want) @@ -288,7 +203,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } sReqPayload := func(p *testpb.Payload) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { want := &testpb.StreamingOutputCallRequest{Payload: p} if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want %v, ", got, err, want) @@ -297,7 +212,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } sRes := func(b byte) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { msg := res(b) if err := stream.Send(msg); err != nil { return status.Errorf(codes.Internal, "server: Send(%v) = %v; want ", msg, err) @@ -306,12 +221,12 @@ func (s) TestRetryStreaming(t *testing.T) { } } sErr := func(c codes.Code) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { return status.New(c, "").Err() } } sCloseSend := func() serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { if msg, err := stream.Recv(); msg != nil || err != io.EOF { return status.Errorf(codes.Internal, "server: Recv() = %v, %v; want , io.EOF", msg, err) } @@ -319,7 +234,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } sPushback := func(s string) serverOp { - return func(stream testpb.TestService_FullDuplexCallServer) error { + return func(stream testgrpc.TestService_FullDuplexCallServer) error { stream.SetTrailer(metadata.MD{"grpc-retry-pushback-ms": []string{s}}) return nil } @@ -327,7 +242,7 @@ func (s) TestRetryStreaming(t *testing.T) { // Client Operations cReq := func(b byte) clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { msg := req(b) if err := stream.Send(msg); err != nil { return fmt.Errorf("client: Send(%v) = %v; want ", msg, err) @@ -336,7 +251,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } cReqPayload := func(p *testpb.Payload) clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { msg := &testpb.StreamingOutputCallRequest{Payload: p} if err := stream.Send(msg); err != nil { return fmt.Errorf("client: Send(%v) = %v; want ", msg, err) @@ -345,7 +260,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } cRes := func(b byte) clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { want := res(b) if got, err := stream.Recv(); err != nil || !proto.Equal(got, want) { return fmt.Errorf("client: Recv() = %v, %v; want %v, ", got, err, want) @@ -354,7 +269,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } cErr := func(c codes.Code) clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { want := status.New(c, "").Err() if c == codes.OK { want = io.EOF @@ -369,7 +284,7 @@ func (s) TestRetryStreaming(t *testing.T) { } } cCloseSend := func() clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { if err := stream.CloseSend(); err != nil { return fmt.Errorf("client: CloseSend() = %v; want ", err) } @@ -378,13 +293,13 @@ func (s) TestRetryStreaming(t *testing.T) { } var curTime time.Time cGetTime := func() clientOp { - return func(_ testpb.TestService_FullDuplexCallClient) error { + return func(_ testgrpc.TestService_FullDuplexCallClient) error { curTime = time.Now() return nil } } cCheckElapsed := func(d time.Duration) clientOp { - return func(_ testpb.TestService_FullDuplexCallClient) error { + return func(_ testgrpc.TestService_FullDuplexCallClient) error { if elapsed := time.Since(curTime); elapsed < d { return fmt.Errorf("elapsed time: %v; want >= %v", elapsed, d) } @@ -392,13 +307,13 @@ func (s) TestRetryStreaming(t *testing.T) { } } cHdr := func() clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { _, err := stream.Header() return err } } cCtx := func() clientOp { - return func(stream testpb.TestService_FullDuplexCallClient) error { + return func(stream testgrpc.TestService_FullDuplexCallClient) error { stream.Context() return nil } @@ -487,7 +402,7 @@ func (s) TestRetryStreaming(t *testing.T) { var serverOpIter int var serverOps []serverOp ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { for serverOpIter < len(serverOps) { op := serverOps[serverOpIter] serverOpIter++ @@ -498,11 +413,8 @@ func (s) TestRetryStreaming(t *testing.T) { return nil }, } - if err := ss.Start([]grpc.ServerOption{}, grpc.WithDefaultCallOptions(grpc.MaxRetryRPCBufferSize(200))); err != nil { - t.Fatalf("Error starting endpoint server: %v", err) - } - defer ss.Stop() - ss.NewServiceConfig(`{ + if err := ss.Start([]grpc.ServerOption{}, grpc.WithDefaultCallOptions(grpc.MaxRetryRPCBufferSize(200)), + grpc.WithDefaultServiceConfig(`{ "methodConfig": [{ "name": [{"service": "grpc.testing.TestService"}], "waitForReady": true, @@ -513,7 +425,10 @@ func (s) TestRetryStreaming(t *testing.T) { "BackoffMultiplier": 1.0, "RetryableStatusCodes": [ "UNAVAILABLE" ] } - }]}`) + }]}`)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) for { if ctx.Err() != nil { @@ -549,3 +464,248 @@ func (s) TestRetryStreaming(t *testing.T) { }() } } + +type retryStatsHandler struct { + mu sync.Mutex + s []stats.RPCStats +} + +func (*retryStatsHandler) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context { + return ctx +} +func (h *retryStatsHandler) HandleRPC(_ context.Context, s stats.RPCStats) { + h.mu.Lock() + h.s = append(h.s, s) + h.mu.Unlock() +} +func (*retryStatsHandler) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { + return ctx +} +func (*retryStatsHandler) HandleConn(context.Context, stats.ConnStats) {} + +func (s) TestRetryStats(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen. Err: %v", err) + } + defer lis.Close() + server := &httpServer{ + waitForEndStream: true, + responses: []httpServerResponse{{ + trailers: [][]string{{ + ":status", "200", + "content-type", "application/grpc", + "grpc-status", "14", // UNAVAILABLE + "grpc-message", "unavailable retry", + "grpc-retry-pushback-ms", "10", + }}, + }, { + headers: [][]string{{ + ":status", "200", + "content-type", "application/grpc", + }}, + payload: []byte{0, 0, 0, 0, 0}, // header for 0-byte response message. + trailers: [][]string{{ + "grpc-status", "0", // OK + }}, + }}, + refuseStream: func(i uint32) bool { + return i == 1 + }, + } + server.start(t, lis) + handler := &retryStatsHandler{} + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithStatsHandler(handler), + grpc.WithDefaultServiceConfig((`{ + "methodConfig": [{ + "name": [{"service": "grpc.testing.TestService"}], + "retryPolicy": { + "MaxAttempts": 4, + "InitialBackoff": ".01s", + "MaxBackoff": ".01s", + "BackoffMultiplier": 1.0, + "RetryableStatusCodes": [ "UNAVAILABLE" ] + } + }]}`))) + if err != nil { + t.Fatalf("failed to dial due to err: %v", err) + } + defer cc.Close() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + client := testgrpc.NewTestServiceClient(cc) + + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("unexpected EmptyCall error: %v", err) + } + handler.mu.Lock() + want := []stats.RPCStats{ + &stats.Begin{}, + &stats.OutHeader{FullMethod: "/grpc.testing.TestService/EmptyCall"}, + &stats.OutPayload{WireLength: 5}, + &stats.End{}, + + &stats.Begin{IsTransparentRetryAttempt: true}, + &stats.OutHeader{FullMethod: "/grpc.testing.TestService/EmptyCall"}, + &stats.OutPayload{WireLength: 5}, + &stats.InTrailer{Trailer: metadata.Pairs("content-type", "application/grpc", "grpc-retry-pushback-ms", "10")}, + &stats.End{}, + + &stats.Begin{}, + &stats.OutHeader{FullMethod: "/grpc.testing.TestService/EmptyCall"}, + &stats.OutPayload{WireLength: 5}, + &stats.InHeader{}, + &stats.InPayload{WireLength: 5}, + &stats.InTrailer{}, + &stats.End{}, + } + + toString := func(ss []stats.RPCStats) (ret []string) { + for _, s := range ss { + ret = append(ret, fmt.Sprintf("%T - %v", s, s)) + } + return ret + } + t.Logf("Handler received frames:\n%v\n---\nwant:\n%v\n", + strings.Join(toString(handler.s), "\n"), + strings.Join(toString(want), "\n")) + + if len(handler.s) != len(want) { + t.Fatalf("received unexpected number of RPCStats: got %v; want %v", len(handler.s), len(want)) + } + + // There is a race between receiving the payload (triggered by the + // application / gRPC library) and receiving the trailer (triggered at the + // transport layer). Adjust the received stats accordingly if necessary. + const tIdx, pIdx = 13, 14 + _, okT := handler.s[tIdx].(*stats.InTrailer) + _, okP := handler.s[pIdx].(*stats.InPayload) + if okT && okP { + handler.s[pIdx], handler.s[tIdx] = handler.s[tIdx], handler.s[pIdx] + } + + for i := range handler.s { + w, s := want[i], handler.s[i] + + // Validate the event type + if reflect.TypeOf(w) != reflect.TypeOf(s) { + t.Fatalf("at position %v: got %T; want %T", i, s, w) + } + wv, sv := reflect.ValueOf(w).Elem(), reflect.ValueOf(s).Elem() + + // Validate that Client is always true + if sv.FieldByName("Client").Interface().(bool) != true { + t.Fatalf("at position %v: got Client=false; want true", i) + } + + // Validate any set fields in want + for i := 0; i < wv.NumField(); i++ { + if !wv.Field(i).IsZero() { + if got, want := sv.Field(i).Interface(), wv.Field(i).Interface(); !reflect.DeepEqual(got, want) { + name := reflect.TypeOf(w).Elem().Field(i).Name + t.Fatalf("at position %v, field %v: got %v; want %v", i, name, got, want) + } + } + } + + // Since the above only tests non-zero-value fields, test + // IsTransparentRetryAttempt=false explicitly when needed. + if wb, ok := w.(*stats.Begin); ok && !wb.IsTransparentRetryAttempt { + if s.(*stats.Begin).IsTransparentRetryAttempt { + t.Fatalf("at position %v: got IsTransparentRetryAttempt=true; want false", i) + } + } + } + + // Validate timings between last Begin and preceding End. + end := handler.s[8].(*stats.End) + begin := handler.s[9].(*stats.Begin) + diff := begin.BeginTime.Sub(end.EndTime) + if diff < 10*time.Millisecond || diff > 50*time.Millisecond { + t.Fatalf("pushback time before final attempt = %v; want ~10ms", diff) + } +} + +func (s) TestRetryTransparentWhenCommitted(t *testing.T) { + // With MaxConcurrentStreams=1: + // + // 1. Create stream 1 that is retriable. + // 2. Stream 1 is created and fails with a retriable code. + // 3. Create dummy stream 2, blocking indefinitely. + // 4. Stream 1 retries (and blocks until stream 2 finishes) + // 5. Stream 1 is canceled manually. + // + // If there is no bug, the stream is done and errors with CANCELED. With a bug: + // + // 6. Stream 1 has a nil stream (attempt.s). Operations like CloseSend will panic. + + first := grpcsync.NewEvent() + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + // signal? + if !first.HasFired() { + first.Fire() + t.Log("returned first error") + return status.Error(codes.AlreadyExists, "first attempt fails and is retriable") + } + t.Log("blocking") + <-stream.Context().Done() + return stream.Context().Err() + }, + } + + if err := ss.Start([]grpc.ServerOption{grpc.MaxConcurrentStreams(1)}, + grpc.WithDefaultServiceConfig(`{ + "methodConfig": [{ + "name": [{"service": "grpc.testing.TestService"}], + "waitForReady": true, + "retryPolicy": { + "MaxAttempts": 2, + "InitialBackoff": ".1s", + "MaxBackoff": ".1s", + "BackoffMultiplier": 1.0, + "RetryableStatusCodes": [ "ALREADY_EXISTS" ] + } + }]}`)); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx1, cancel1 := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel1() + ctx2, cancel2 := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel2() + + stream1, err := ss.Client.FullDuplexCall(ctx1) + if err != nil { + t.Fatalf("Error creating stream 1: %v", err) + } + + // Create dummy stream to block indefinitely. + _, err = ss.Client.FullDuplexCall(ctx2) + if err != nil { + t.Errorf("Error creating stream 2: %v", err) + } + + stream1Closed := grpcsync.NewEvent() + go func() { + _, err := stream1.Recv() + // Will trigger a retry when it sees the ALREADY_EXISTS error + if status.Code(err) != codes.Canceled { + t.Errorf("Expected stream1 to be canceled; got error: %v", err) + } + stream1Closed.Fire() + }() + + // Wait longer than the retry backoff timer. + time.Sleep(200 * time.Millisecond) + cancel1() + + // Operations on the stream should not panic. + <-stream1Closed.Done() + stream1.CloseSend() + stream1.Recv() + stream1.Send(&testpb.StreamingOutputCallRequest{}) +} diff --git a/test/roundrobin_test.go b/test/roundrobin_test.go new file mode 100644 index 000000000000..92fed10ffed0 --- /dev/null +++ b/test/roundrobin_test.go @@ -0,0 +1,319 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/channelz" + imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/stubserver" + rrutil "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +const rrServiceConfig = `{"loadBalancingConfig": [{"round_robin":{}}]}` + +func testRoundRobinBasic(ctx context.Context, t *testing.T, opts ...grpc.DialOption) (*grpc.ClientConn, *manual.Resolver, []*stubserver.StubServer) { + t.Helper() + + // Initialize channelz. Used to determine pending RPC count. + czCleanup := channelz.NewChannelzStorageForTesting() + t.Cleanup(func() { czCleanupWrapper(czCleanup, t) }) + + r := manual.NewBuilderWithScheme("whatever") + + const backendCount = 5 + backends := make([]*stubserver.StubServer, backendCount) + addrs := make([]resolver.Address, backendCount) + for i := 0; i < backendCount; i++ { + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + + backends[i] = backend + addrs[i] = resolver.Address{Addr: backend.Address} + } + + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(rrServiceConfig), + } + dopts = append(dopts, opts...) + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + client := testgrpc.NewTestServiceClient(cc) + + // At this point, the resolver has not returned any addresses to the channel. + // This RPC must block until the context expires. + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() = %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + r.UpdateState(resolver.State{Addresses: addrs}) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { + t.Fatal(err) + } + return cc, r, backends +} + +// TestRoundRobin_Basic tests the most basic scenario for round_robin. It brings +// up a bunch of backends and verifies that RPCs are getting round robin-ed +// across these backends. +func (s) TestRoundRobin_Basic(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testRoundRobinBasic(ctx, t) +} + +// TestRoundRobin_AddressesRemoved tests the scenario where a bunch of backends +// are brought up, and round_robin is configured as the LB policy and RPCs are +// being correctly round robin-ed across these backends. We then send a resolver +// update with no addresses and verify that the channel enters TransientFailure +// and RPCs fail with an expected error message. +func (s) TestRoundRobin_AddressesRemoved(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, r, _ := testRoundRobinBasic(ctx, t) + + // Send a resolver update with no addresses. This should push the channel into + // TransientFailure. + r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) + awaitState(ctx, t, cc, connectivity.TransientFailure) + + const msgWant = "produced zero addresses" + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); !strings.Contains(status.Convert(err).Message(), msgWant) { + t.Fatalf("EmptyCall() = %v, want Contains(Message(), %q)", err, msgWant) + } +} + +// TestRoundRobin_NewAddressWhileBlocking tests the case where round_robin is +// configured on a channel, things are working as expected and then a resolver +// updates removes all addresses. An RPC attempted at this point in time will be +// blocked because there are no valid backends. This test verifies that when new +// backends are added, the RPC is able to complete. +func (s) TestRoundRobin_NewAddressWhileBlocking(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, r, backends := testRoundRobinBasic(ctx, t) + + // Send a resolver update with no addresses. This should push the channel into + // TransientFailure. + r.UpdateState(resolver.State{Addresses: []resolver.Address{}}) + awaitState(ctx, t, cc, connectivity.TransientFailure) + + client := testgrpc.NewTestServiceClient(cc) + doneCh := make(chan struct{}) + go func() { + // The channel is currently in TransientFailure and this RPC will block + // until the channel becomes Ready, which will only happen when we push a + // resolver update with a valid backend address. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Errorf("EmptyCall() = %v, want ", err) + } + close(doneCh) + }() + + // Make sure that there is one pending RPC on the ClientConn before attempting + // to push new addresses through the name resolver. If we don't do this, the + // resolver update can happen before the above goroutine gets to make the RPC. + for { + if err := ctx.Err(); err != nil { + t.Fatal(err) + } + tcs, _ := channelz.GetTopChannels(0, 0) + if len(tcs) != 1 { + t.Fatalf("there should only be one top channel, not %d", len(tcs)) + } + started := tcs[0].ChannelData.CallsStarted + completed := tcs[0].ChannelData.CallsSucceeded + tcs[0].ChannelData.CallsFailed + if (started - completed) == 1 { + break + } + time.Sleep(defaultTestShortTimeout) + } + + // Send a resolver update with a valid backend to push the channel to Ready + // and unblock the above RPC. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: backends[0].Address}}}) + + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for blocked RPC to complete") + case <-doneCh: + } +} + +// TestRoundRobin_OneServerDown tests the scenario where a channel is configured +// to round robin across a set of backends, and things are working correctly. +// One backend goes down. The test verifies that going forward, RPCs are round +// robin-ed across the remaining set of backends. +func (s) TestRoundRobin_OneServerDown(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, _, backends := testRoundRobinBasic(ctx, t) + + // Stop one backend. RPCs should round robin across the remaining backends. + backends[len(backends)-1].Stop() + + addrs := make([]resolver.Address, len(backends)-1) + for i := 0; i < len(backends)-1; i++ { + addrs[i] = resolver.Address{Addr: backends[i].Address} + } + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, addrs); err != nil { + t.Fatalf("RPCs are not being round robined across remaining servers: %v", err) + } +} + +// TestRoundRobin_AllServersDown tests the scenario where a channel is +// configured to round robin across a set of backends, and things are working +// correctly. Then, all backends go down. The test verifies that the channel +// moves to TransientFailure and failfast RPCs fail with Unavailable. +func (s) TestRoundRobin_AllServersDown(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc, _, backends := testRoundRobinBasic(ctx, t) + + // Stop all backends. + for _, b := range backends { + b.Stop() + } + + awaitState(ctx, t, cc, connectivity.TransientFailure) + + // Failfast RPCs should fail with Unavailable. + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(context.Background(), &testpb.Empty{}); status.Code(err) == codes.Unavailable { + return + } +} + +// TestRoundRobin_UpdateAddressAttributes tests the scenario where the addresses +// returned by the resolver contain attributes. The test verifies that the +// attributes contained in the addresses show up as RPC metadata in the backend. +func (s) TestRoundRobin_UpdateAddressAttributes(t *testing.T) { + const ( + testMDKey = "test-md" + testMDValue = "test-md-value" + ) + r := manual.NewBuilderWithScheme("whatever") + + // Spin up a StubServer to serve as a backend. The implementation verifies + // that the expected metadata is received. + testMDChan := make(chan []string, 1) + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, _ *testpb.Empty) (*testpb.Empty, error) { + md, ok := metadata.FromIncomingContext(ctx) + if ok { + select { + case testMDChan <- md[testMDKey]: + case <-ctx.Done(): + return nil, ctx.Err() + } + } + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started TestService backend at: %q", backend.Address) + t.Cleanup(func() { backend.Stop() }) + + // Dial the backend with round_robin as the LB policy. + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(rrServiceConfig), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + // Send a resolver update with no address attributes. + addr := resolver.Address{Addr: backend.Address} + r.UpdateState(resolver.State{Addresses: []resolver.Address{addr}}) + + // Make an RPC and ensure it does not contain the metadata we are looking for. + client := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = %v, want ", err) + } + select { + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for metadata received in RPC") + case md := <-testMDChan: + if len(md) != 0 { + t.Fatalf("received metadata %v, want nil", md) + } + } + + // Send a resolver update with address attributes. + addrWithAttributes := imetadata.Set(addr, metadata.Pairs(testMDKey, testMDValue)) + r.UpdateState(resolver.State{Addresses: []resolver.Address{addrWithAttributes}}) + + // Make an RPC and ensure it contains the metadata we are looking for. The + // resolver update isn't processed synchronously, so we wait some time before + // failing if some RPCs do not contain it. +Done: + for { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() = %v, want ", err) + } + select { + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for metadata received in RPC") + case md := <-testMDChan: + if len(md) == 1 && md[0] == testMDValue { + break Done + } + } + time.Sleep(defaultTestShortTimeout) + } +} diff --git a/test/server_test.go b/test/server_test.go index 97f352328873..6d525f7954e1 100644 --- a/test/server_test.go +++ b/test/server_test.go @@ -27,11 +27,48 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) type ctxKey string +// TestServerReturningContextError verifies that if a context error is returned +// by the service handler, the status will have the correct status code, not +// Unknown. +func (s) TestServerReturningContextError(t *testing.T) { + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return nil, context.DeadlineExceeded + }, + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + return context.DeadlineExceeded + }, + } + if err := ss.Start(nil); err != nil { + t.Fatalf("Error starting endpoint server: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}) + if s, ok := status.FromError(err); !ok || s.Code() != codes.DeadlineExceeded { + t.Fatalf("ss.Client.EmptyCall() got error %v; want ", err) + } + + stream, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("unexpected error starting the stream: %v", err) + } + _, err = stream.Recv() + if s, ok := status.FromError(err); !ok || s.Code() != codes.DeadlineExceeded { + t.Fatalf("ss.Client.FullDuplexCall().Recv() got error %v; want ", err) + } + +} + func (s) TestChainUnaryServerInterceptor(t *testing.T) { var ( firstIntKey = ctxKey("firstIntKey") @@ -251,7 +288,7 @@ func (s) TestChainStreamServerInterceptor(t *testing.T) { } ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { if callCounts[0] != 1 { return status.Errorf(codes.Internal, "callCounts[0] should be 1, but got=%d", callCounts[0]) } diff --git a/test/servertester.go b/test/servertester.go index 9758e8eb6cf8..3701a0e094d9 100644 --- a/test/servertester.go +++ b/test/servertester.go @@ -138,19 +138,46 @@ func (st *serverTester) writeSettingsAck() { } } +func (st *serverTester) wantGoAway(errCode http2.ErrCode) *http2.GoAwayFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RST frame: %v", err) + } + gaf, ok := f.(*http2.GoAwayFrame) + if !ok { + st.t.Fatalf("got a %T; want *http2.GoAwayFrame", f) + } + if gaf.ErrCode != errCode { + st.t.Fatalf("expected GOAWAY error code '%v', got '%v'", errCode.String(), gaf.ErrCode.String()) + } + return gaf +} + +func (st *serverTester) wantPing() *http2.PingFrame { + f, err := st.readFrame() + if err != nil { + st.t.Fatalf("Error while expecting an RST frame: %v", err) + } + pf, ok := f.(*http2.PingFrame) + if !ok { + st.t.Fatalf("got a %T; want *http2.GoAwayFrame", f) + } + return pf +} + func (st *serverTester) wantRSTStream(errCode http2.ErrCode) *http2.RSTStreamFrame { f, err := st.readFrame() if err != nil { st.t.Fatalf("Error while expecting an RST frame: %v", err) } - sf, ok := f.(*http2.RSTStreamFrame) + rf, ok := f.(*http2.RSTStreamFrame) if !ok { st.t.Fatalf("got a %T; want *http2.RSTStreamFrame", f) } - if sf.ErrCode != errCode { - st.t.Fatalf("expected RST error code '%v', got '%v'", errCode.String(), sf.ErrCode.String()) + if rf.ErrCode != errCode { + st.t.Fatalf("expected RST error code '%v', got '%v'", errCode.String(), rf.ErrCode.String()) } - return sf + return rf } func (st *serverTester) wantSettings() *http2.SettingsFrame { @@ -273,3 +300,9 @@ func (st *serverTester) writeRSTStream(streamID uint32, code http2.ErrCode) { st.t.Fatalf("Error writing RST_STREAM: %v", err) } } + +func (st *serverTester) writePing(ack bool, data [8]byte) { + if err := st.fr.WritePing(ack, data); err != nil { + st.t.Fatalf("Error writing PING: %v", err) + } +} diff --git a/test/service_config_deprecated_test.go b/test/service_config_deprecated_test.go new file mode 100644 index 000000000000..ecf43a5760fe --- /dev/null +++ b/test/service_config_deprecated_test.go @@ -0,0 +1,461 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// The following functions with function name ending with TD indicates that they +// should be deleted after old service config API is deprecated and deleted. +func testServiceConfigSetupTD(t *testing.T, e env) (*test, chan grpc.ServiceConfig) { + te := newTest(t, e) + // We write before read. + ch := make(chan grpc.ServiceConfig, 1) + te.sc = ch + te.userAgent = testAppUA + te.declareLogNoise( + "transport: http2Client.notifyError got notified that the client transport was broken EOF", + "grpc: addrConn.transportMonitor exits due to: grpc: the connection is closing", + "grpc: addrConn.resetTransport failed to create client transport: connection error", + "Failed to dial : context canceled; please retry.", + ) + return te, ch +} + +func (s) TestServiceConfigGetMethodConfigTD(t *testing.T) { + for _, e := range listTestEnv() { + testGetMethodConfigTD(t, e) + } +} + +func testGetMethodConfigTD(t *testing.T, e env) { + te, ch := testServiceConfigSetupTD(t, e) + defer te.tearDown() + + mc1 := grpc.MethodConfig{ + WaitForReady: newBool(true), + Timeout: newDuration(time.Millisecond), + } + mc2 := grpc.MethodConfig{WaitForReady: newBool(false)} + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc1 + m["/grpc.testing.TestService/"] = mc2 + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/UnaryCall"] = mc1 + m["/grpc.testing.TestService/"] = mc2 + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + // Wait for the new service config to propagate. + for { + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + break + } + } + // The following RPCs are expected to become fail-fast. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.Unavailable) + } +} + +func (s) TestServiceConfigWaitForReadyTD(t *testing.T) { + for _, e := range listTestEnv() { + testServiceConfigWaitForReadyTD(t, e) + } +} + +func testServiceConfigWaitForReadyTD(t *testing.T, e env) { + te, ch := testServiceConfigSetupTD(t, e) + defer te.tearDown() + + // Case1: Client API set failfast to be false, and service config set wait_for_ready to be false, Client API should win, and the rpc will wait until deadline exceeds. + mc := grpc.MethodConfig{ + WaitForReady: newBool(false), + Timeout: newDuration(time.Millisecond), + } + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + + // Generate a service config update. + // Case2: Client API does not set failfast, and service config set wait_for_ready to be true, and the rpc will wait until deadline exceeds. + mc.WaitForReady = newBool(true) + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + // Wait for the new service config to take effect. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + if *mc.WaitForReady { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for service config to take effect") + } + + // The following RPCs are expected to become non-fail-fast ones with 1ms deadline. + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + if _, err := tc.FullDuplexCall(ctx); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } +} + +func (s) TestServiceConfigTimeoutTD(t *testing.T) { + for _, e := range listTestEnv() { + testServiceConfigTimeoutTD(t, e) + } +} + +func testServiceConfigTimeoutTD(t *testing.T, e env) { + te, ch := testServiceConfigSetupTD(t, e) + defer te.tearDown() + + // Case1: Client API sets timeout to be 1ns and ServiceConfig sets timeout to be 1hr. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. + mc := grpc.MethodConfig{ + Timeout: newDuration(time.Hour), + } + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + cc := te.clientConn() + tc := testgrpc.NewTestServiceClient(cc) + // The following RPCs are expected to become non-fail-fast ones with 1ns deadline. + ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + cancel() + ctx, cancel = context.WithTimeout(context.Background(), time.Nanosecond) + if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + cancel() + + // Generate a service config update. + // Case2: Client API sets timeout to be 1hr and ServiceConfig sets timeout to be 1ns. Timeout should be 1ns (min of 1ns and 1hr) and the rpc will wait until deadline exceeds. + mc.Timeout = newDuration(time.Nanosecond) + m = make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/EmptyCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc = grpc.ServiceConfig{ + Methods: m, + } + ch <- sc + + // Wait for the new service config to take effect. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + mc = cc.GetMethodConfig("/grpc.testing.TestService/FullDuplexCall") + if *mc.Timeout == time.Nanosecond { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for service config to take effect") + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + if _, err := tc.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/EmptyCall(_, _) = _, %v, want _, %s", err, codes.DeadlineExceeded) + } + cancel() + + ctx, cancel = context.WithTimeout(context.Background(), time.Hour) + if _, err := tc.FullDuplexCall(ctx, grpc.WaitForReady(true)); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("TestService/FullDuplexCall(_) = _, %v, want %s", err, codes.DeadlineExceeded) + } + cancel() +} + +func (s) TestServiceConfigMaxMsgSizeTD(t *testing.T) { + for _, e := range listTestEnv() { + testServiceConfigMaxMsgSizeTD(t, e) + } +} + +func testServiceConfigMaxMsgSizeTD(t *testing.T, e env) { + // Setting up values and objects shared across all test cases. + const smallSize = 1 + const largeSize = 1024 + const extraLargeSize = 2048 + + smallPayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, smallSize) + if err != nil { + t.Fatal(err) + } + largePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, largeSize) + if err != nil { + t.Fatal(err) + } + extraLargePayload, err := newPayload(testpb.PayloadType_COMPRESSABLE, extraLargeSize) + if err != nil { + t.Fatal(err) + } + + mc := grpc.MethodConfig{ + MaxReqSize: newInt(extraLargeSize), + MaxRespSize: newInt(extraLargeSize), + } + + m := make(map[string]grpc.MethodConfig) + m["/grpc.testing.TestService/UnaryCall"] = mc + m["/grpc.testing.TestService/FullDuplexCall"] = mc + sc := grpc.ServiceConfig{ + Methods: m, + } + // Case1: sc set maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te1, ch1 := testServiceConfigSetupTD(t, e) + te1.startServer(&testServer{security: e.security}) + defer te1.tearDown() + + ch1 <- sc + tc := testgrpc.NewTestServiceClient(te1.clientConn()) + + req := &testpb.SimpleRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseSize: int32(extraLargeSize), + Payload: smallPayload, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Test for unary RPC recv. + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = extraLargePayload + req.ResponseSize = int32(smallSize) + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + respParam := []*testpb.ResponseParameters{ + { + Size: int32(extraLargeSize), + }, + } + sreq := &testpb.StreamingOutputCallRequest{ + ResponseType: testpb.PayloadType_COMPRESSABLE, + ResponseParameters: respParam, + Payload: smallPayload, + } + stream, err := tc.FullDuplexCall(te1.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = int32(smallSize) + sreq.Payload = extraLargePayload + stream, err = tc.FullDuplexCall(te1.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } + + // Case2: Client API set maxReqSize to 1024 (send), maxRespSize to 1024 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te2, ch2 := testServiceConfigSetupTD(t, e) + te2.maxClientReceiveMsgSize = newInt(1024) + te2.maxClientSendMsgSize = newInt(1024) + te2.startServer(&testServer{security: e.security}) + defer te2.tearDown() + ch2 <- sc + tc = testgrpc.NewTestServiceClient(te2.clientConn()) + + // Test for unary RPC recv. + req.Payload = smallPayload + req.ResponseSize = int32(largeSize) + + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = int32(smallSize) + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + stream, err = tc.FullDuplexCall(te2.ctx) + respParam[0].Size = int32(largeSize) + sreq.Payload = smallPayload + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = int32(smallSize) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te2.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } + + // Case3: Client API set maxReqSize to 4096 (send), maxRespSize to 4096 (recv). Sc sets maxReqSize to 2048 (send), maxRespSize to 2048 (recv). + te3, ch3 := testServiceConfigSetupTD(t, e) + te3.maxClientReceiveMsgSize = newInt(4096) + te3.maxClientSendMsgSize = newInt(4096) + te3.startServer(&testServer{security: e.security}) + defer te3.tearDown() + ch3 <- sc + tc = testgrpc.NewTestServiceClient(te3.clientConn()) + + // Test for unary RPC recv. + req.Payload = smallPayload + req.ResponseSize = int32(largeSize) + + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) + } + + req.ResponseSize = int32(extraLargeSize) + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for unary RPC send. + req.Payload = largePayload + req.ResponseSize = int32(smallSize) + if _, err := tc.UnaryCall(ctx, req); err != nil { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want ", err) + } + + req.Payload = extraLargePayload + if _, err := tc.UnaryCall(ctx, req); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("TestService/UnaryCall(_, _) = _, %v, want _, error code: %s", err, codes.ResourceExhausted) + } + + // Test for streaming RPC recv. + stream, err = tc.FullDuplexCall(te3.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + respParam[0].Size = int32(largeSize) + sreq.Payload = smallPayload + + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("%v.Recv() = _, %v, want ", stream, err) + } + + respParam[0].Size = int32(extraLargeSize) + + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + if _, err := stream.Recv(); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Recv() = _, %v, want _, error code: %s", stream, err, codes.ResourceExhausted) + } + + // Test for streaming RPC send. + respParam[0].Size = int32(smallSize) + sreq.Payload = largePayload + stream, err = tc.FullDuplexCall(te3.ctx) + if err != nil { + t.Fatalf("%v.FullDuplexCall(_) = _, %v, want ", tc, err) + } + if err := stream.Send(sreq); err != nil { + t.Fatalf("%v.Send(%v) = %v, want ", stream, sreq, err) + } + sreq.Payload = extraLargePayload + if err := stream.Send(sreq); err == nil || status.Code(err) != codes.ResourceExhausted { + t.Fatalf("%v.Send(%v) = %v, want _, error code: %s", stream, sreq, err, codes.ResourceExhausted) + } +} diff --git a/test/stream_cleanup_test.go b/test/stream_cleanup_test.go index 83dd68549e99..f1d7c536aa01 100644 --- a/test/stream_cleanup_test.go +++ b/test/stream_cleanup_test.go @@ -28,7 +28,9 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/stubserver" "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" ) func (s) TestStreamCleanup(t *testing.T) { @@ -46,7 +48,7 @@ func (s) TestStreamCleanup(t *testing.T) { return &testpb.Empty{}, nil }, } - if err := ss.Start([]grpc.ServerOption{grpc.MaxConcurrentStreams(1)}, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(callRecvMsgSize))), grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { + if err := ss.Start(nil, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(callRecvMsgSize))), grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() @@ -68,7 +70,7 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) { serverReturnedStatus := make(chan struct{}) ss := &stubserver.StubServer{ - FullDuplexCallF: func(stream testpb.TestService_FullDuplexCallServer) error { + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { defer func() { close(serverReturnedStatus) }() @@ -79,7 +81,7 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) { }) }, } - if err := ss.Start([]grpc.ServerOption{grpc.MaxConcurrentStreams(1)}, grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { + if err := ss.Start(nil, grpc.WithInitialWindowSize(int32(initialWindowSize))); err != nil { t.Fatalf("Error starting endpoint server: %v", err) } defer ss.Stop() @@ -132,6 +134,6 @@ func (s) TestStreamCleanupAfterSendStatus(t *testing.T) { case <-gracefulStopDone: timer.Stop() case <-timer.C: - t.Fatalf("s.GracefulStop() didn't finish without 1 second after the last RPC") + t.Fatalf("s.GracefulStop() didn't finish within 1 second after the last RPC") } } diff --git a/test/subconn_test.go b/test/subconn_test.go new file mode 100644 index 000000000000..524acf9f70e3 --- /dev/null +++ b/test/subconn_test.go @@ -0,0 +1,125 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/stubserver" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/resolver" +) + +type tsccPicker struct { + sc balancer.SubConn +} + +func (p *tsccPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{SubConn: p.sc}, nil +} + +// TestSubConnEmpty tests that removing all addresses from a SubConn and then +// re-adding them does not cause a panic and properly reconnects. +func (s) TestSubConnEmpty(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // sc is the one SubConn used throughout the test. Created on demand and + // re-used on every update. + var sc balancer.SubConn + + // Simple custom balancer that sets the address list to empty if the + // resolver produces no addresses. Pickfirst, by default, will remove the + // SubConn in this case instead. + bal := stub.BalancerFuncs{ + UpdateClientConnState: func(d *stub.BalancerData, ccs balancer.ClientConnState) error { + if sc == nil { + var err error + sc, err = d.ClientConn.NewSubConn(ccs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error creating initial subconn: %v", err) + } + } else { + d.ClientConn.UpdateAddresses(sc, ccs.ResolverState.Addresses) + } + sc.Connect() + + if len(ccs.ResolverState.Addresses) == 0 { + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(errors.New("no addresses")), + }) + } else { + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &tsccPicker{sc: sc}, + }) + } + return nil + }, + UpdateSubConnState: func(d *stub.BalancerData, sc balancer.SubConn, scs balancer.SubConnState) { + switch scs.ConnectivityState { + case connectivity.Ready: + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &tsccPicker{sc: sc}, + }) + case connectivity.TransientFailure: + d.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(fmt.Errorf("error connecting: %v", scs.ConnectionError)), + }) + } + }, + } + stub.Register("tscc", bal) + + // Start the stub server with our stub balancer. + ss := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := ss.Start(nil, grpc.WithDefaultServiceConfig(`{"loadBalancingConfig": [{"tscc":{}}]}`)); err != nil { + t.Fatalf("Error starting server: %v", err) + } + defer ss.Stop() + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall failed: %v", err) + } + + t.Log("Removing addresses from resolver and SubConn") + ss.R.UpdateState(resolver.State{Addresses: []resolver.Address{}}) + awaitState(ctx, t, ss.CC, connectivity.TransientFailure) + + t.Log("Re-adding addresses to resolver and SubConn") + ss.R.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: ss.Address}}}) + if _, err := ss.Client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall failed: %v", err) + } +} diff --git a/security/advancedtls/sni_appengine.go b/test/timeouts.go similarity index 69% rename from security/advancedtls/sni_appengine.go rename to test/timeouts.go index fffbb0107ddd..1c0c2123938a 100644 --- a/security/advancedtls/sni_appengine.go +++ b/test/timeouts.go @@ -1,5 +1,3 @@ -// +build appengine - /* * * Copyright 2020 gRPC authors. @@ -18,13 +16,14 @@ * */ -package advancedtls +package test -import ( - "crypto/tls" -) +import "time" -// buildGetCertificates is a no-op for appengine builds. -func buildGetCertificates(clientHello *tls.ClientHelloInfo, o *ServerOptions) (*tls.Certificate, error) { - return nil, nil -} +const ( + // Default timeout for tests in this package. + defaultTestTimeout = 10 * time.Second + // Default short timeout, to be used when waiting for events which are not + // expected to happen. + defaultTestShortTimeout = 100 * time.Millisecond +) diff --git a/test/tools/go.mod b/test/tools/go.mod index 874268d34fce..4e91307e87c5 100644 --- a/test/tools/go.mod +++ b/test/tools/go.mod @@ -1,12 +1,13 @@ module google.golang.org/grpc/test/tools -go 1.11 +go 1.14 require ( github.com/client9/misspell v0.3.4 - github.com/golang/protobuf v1.4.1 - golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 - golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 - google.golang.org/protobuf v1.25.0 // indirect - honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc + github.com/golang/protobuf v1.5.3 + golang.org/x/exp/typeparams v0.0.0-20230418202329-0354be287a23 // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 + golang.org/x/tools v0.8.0 + google.golang.org/protobuf v1.30.0 // indirect + honnef.co/go/tools v0.4.3 ) diff --git a/test/tools/go.sum b/test/tools/go.sum index 09acda10d25c..af54bb815338 100644 --- a/test/tools/go.sum +++ b/test/tools/go.sum @@ -1,71 +1,74 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230418202329-0354be287a23 h1:EQdGOd2o46bhZIT8VRldX3rWAaqCsxQWznzxhLZLiM8= +golang.org/x/exp/typeparams v0.0.0-20230418202329-0354be287a23/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.1-0.20221208213631-3f74d914ae6d/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= +honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= diff --git a/test/tools/tools.go b/test/tools/tools.go index 511dc2534462..646a144ccca1 100644 --- a/test/tools/tools.go +++ b/test/tools/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools /* @@ -18,10 +19,9 @@ * */ -// This package exists to cause `go mod` and `go get` to believe these tools -// are dependencies, even though they are not runtime dependencies of any grpc -// package. This means they will appear in our `go.mod` file, but will not be -// a part of the build. +// This file is not intended to be compiled. Because some of these imports are +// not actual go packages, we use a build constraint at the top of this file to +// prevent tools from inspecting the imports. package tools diff --git a/test/tools/tools_vet.go b/test/tools/tools_vet.go new file mode 100644 index 000000000000..06ab2fd10be2 --- /dev/null +++ b/test/tools/tools_vet.go @@ -0,0 +1,21 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tools is used to pin specific versions of external tools in this +// module's go.mod that gRPC uses for internal testing. +package tools diff --git a/test/transport_test.go b/test/transport_test.go new file mode 100644 index 000000000000..d58bdf8acd77 --- /dev/null +++ b/test/transport_test.go @@ -0,0 +1,155 @@ +/* +* +* Copyright 2023 gRPC authors. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +* + */ +package test + +import ( + "context" + "io" + "net" + "sync" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// connWrapperWithCloseCh wraps a net.Conn and fires an event when closed. +type connWrapperWithCloseCh struct { + net.Conn + close *grpcsync.Event +} + +// Close closes the connection and sends a value on the close channel. +func (cw *connWrapperWithCloseCh) Close() error { + cw.close.Fire() + return cw.Conn.Close() +} + +// These custom creds are used for storing the connections made by the client. +// The closeCh in conn can be used to detect when conn is closed. +type transportRestartCheckCreds struct { + mu sync.Mutex + connections []*connWrapperWithCloseCh +} + +func (c *transportRestartCheckCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return rawConn, nil, nil +} +func (c *transportRestartCheckCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + c.mu.Lock() + defer c.mu.Unlock() + conn := &connWrapperWithCloseCh{Conn: rawConn, close: grpcsync.NewEvent()} + c.connections = append(c.connections, conn) + return conn, nil, nil +} +func (c *transportRestartCheckCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{} +} +func (c *transportRestartCheckCreds) Clone() credentials.TransportCredentials { + return c +} +func (c *transportRestartCheckCreds) OverrideServerName(s string) error { + return nil +} + +// Tests that the client transport drains and restarts when next stream ID exceeds +// MaxStreamID. This test also verifies that subsequent RPCs use a new client +// transport and the old transport is closed. +func (s) TestClientTransportRestartsAfterStreamIDExhausted(t *testing.T) { + // Set the transport's MaxStreamID to 4 to cause connection to drain after 2 RPCs. + originalMaxStreamID := transport.MaxStreamID + transport.MaxStreamID = 4 + defer func() { + transport.MaxStreamID = originalMaxStreamID + }() + + ss := &stubserver.StubServer{ + FullDuplexCallF: func(stream testgrpc.TestService_FullDuplexCallServer) error { + if _, err := stream.Recv(); err != nil { + return status.Errorf(codes.Internal, "unexpected error receiving: %v", err) + } + if err := stream.Send(&testpb.StreamingOutputCallResponse{}); err != nil { + return status.Errorf(codes.Internal, "unexpected error sending: %v", err) + } + if recv, err := stream.Recv(); err != io.EOF { + return status.Errorf(codes.Internal, "Recv = %v, %v; want _, io.EOF", recv, err) + } + return nil + }, + } + + creds := &transportRestartCheckCreds{} + if err := ss.Start(nil, grpc.WithTransportCredentials(creds)); err != nil { + t.Fatalf("Starting stubServer: %v", err) + } + defer ss.Stop() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + var streams []testgrpc.TestService_FullDuplexCallClient + + const numStreams = 3 + // expected number of conns when each stream is created i.e., 3rd stream is created + // on a new connection. + expectedNumConns := [numStreams]int{1, 1, 2} + + // Set up 3 streams. + for i := 0; i < numStreams; i++ { + s, err := ss.Client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("Creating FullDuplex stream: %v", err) + } + streams = append(streams, s) + // Verify expected num of conns after each stream is created. + if len(creds.connections) != expectedNumConns[i] { + t.Fatalf("Got number of connections created: %v, want: %v", len(creds.connections), expectedNumConns[i]) + } + } + + // Verify all streams still work. + for i, stream := range streams { + if err := stream.Send(&testpb.StreamingOutputCallRequest{}); err != nil { + t.Fatalf("Sending on stream %d: %v", i, err) + } + if _, err := stream.Recv(); err != nil { + t.Fatalf("Receiving on stream %d: %v", i, err) + } + } + + for i, stream := range streams { + if err := stream.CloseSend(); err != nil { + t.Fatalf("CloseSend() on stream %d: %v", i, err) + } + } + + // Verifying first connection was closed. + select { + case <-creds.connections[0].close.Done(): + case <-ctx.Done(): + t.Fatal("Timeout expired when waiting for first client transport to close") + } +} diff --git a/test/xds/xds_client_ack_nack_test.go b/test/xds/xds_client_ack_nack_test.go new file mode 100644 index 000000000000..87ff0077cd70 --- /dev/null +++ b/test/xds/xds_client_ack_nack_test.go @@ -0,0 +1,190 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// We are interested in LDS, RDS, CDS and EDS resources as part of the regular +// xDS flow on the client. +const wantResources = 4 + +// seenAllACKs returns true if the provided ackVersions map contains valid acks +// for all the resources that we are interested in. If `wantNonEmpty` is true, +// only non-empty ack versions are considered valid. +func seenAllACKs(acksVersions map[string]string, wantNonEmpty bool) bool { + if len(acksVersions) != wantResources { + return false + } + for _, ack := range acksVersions { + if wantNonEmpty && ack == "" { + return false + } + } + return true +} + +// TestClientResourceVersionAfterStreamRestart tests the scenario where the +// xdsClient's ADS stream to the management server gets broken. This test +// verifies that the version number on the initial request on the new stream +// indicates the most recent version seen by the client on the previous stream. +func (s) TestClientResourceVersionAfterStreamRestart(t *testing.T) { + // Create a restartable listener which can close existing connections. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + + // We depend on the fact that the management server assigns monotonically + // increasing stream IDs starting at 1. + const ( + idBeforeRestart = 1 + idAfterRestart = 2 + ) + + // Events of importance in the test, in the order in which they are expected + // to happen. + acksReceivedBeforeRestart := grpcsync.NewEvent() + streamRestarted := grpcsync.NewEvent() + acksReceivedAfterRestart := grpcsync.NewEvent() + + // Map from stream id to a map of resource type to resource version. + ackVersionsMap := make(map[int64]map[string]string) + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + Listener: lis, + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // Return early under the following circumstances: + // - Received all the requests we wanted to see. This is to avoid + // any stray requests leading to test flakes. + // - Request contains no resource names. Such requests are usually + // seen when the xdsclient is shutting down and is no longer + // interested in the resources that it had subscribed to earlier. + if acksReceivedAfterRestart.HasFired() || len(req.GetResourceNames()) == 0 { + return nil + } + // Create a stream specific map to store ack versions if this is the + // first time we are seeing this stream id. + if ackVersionsMap[id] == nil { + ackVersionsMap[id] = make(map[string]string) + } + ackVersionsMap[id][req.GetTypeUrl()] = req.GetVersionInfo() + // Prior to stream restart, we are interested only in non-empty + // resource versions. The xdsclient first sends out requests with an + // empty version string. After receipt of requested resource, it + // sends out another request for the same resource, but this time + // with a non-empty version string, to serve as an ACK. + if seenAllACKs(ackVersionsMap[idBeforeRestart], true) { + acksReceivedBeforeRestart.Fire() + } + // After stream restart, we expect the xdsclient to send out + // requests with version string set to the previously ACKed + // versions. If it sends out requests with empty version string, it + // is a bug and we want this test to catch it. + if seenAllACKs(ackVersionsMap[idAfterRestart], false) { + acksReceivedAfterRestart.Fire() + } + return nil + }, + OnStreamClosed: func(int64, *v3corepb.Node) { + streamRestarted.Fire() + }, + }) + defer cleanup1() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // A successful RPC means that the xdsclient received all requested + // resources. The ACKs from the xdsclient may get a little delayed. So, we + // need to wait for all ACKs to be received on the management server before + // restarting the stream. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for all resources to be ACKed prior to stream restart") + case <-acksReceivedBeforeRestart.Done(): + } + + // Stop the listener on the management server. This will cause the client to + // backoff and recreate the stream. + lis.Stop() + + // Wait for the stream to be closed on the server. + <-streamRestarted.Done() + + // Restart the listener on the management server to be able to accept + // reconnect attempts from the client. + lis.Restart() + + // Wait for all the previously sent resources to be re-requested. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for all resources to be ACKed post stream restart") + case <-acksReceivedAfterRestart.Done(): + } + + if diff := cmp.Diff(ackVersionsMap[idBeforeRestart], ackVersionsMap[idAfterRestart]); diff != "" { + t.Fatalf("unexpected diff in ack versions before and after stream restart (-want, +got):\n%s", diff) + } + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/test/xds/xds_client_affinity_test.go b/test/xds/xds_client_affinity_test.go new file mode 100644 index 000000000000..7fff019fa526 --- /dev/null +++ b/test/xds/xds_client_affinity_test.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// hashRouteConfig returns a RouteConfig resource with hash policy set to +// header "session_id". +func hashRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{{ + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: "session_id", + }, + }, + Terminal: true, + }}, + }}, + }}, + }}, + } +} + +// ringhashCluster returns a Cluster resource that picks ringhash as the lb +// policy. +func ringhashCluster(clusterName, edsServiceName string) *v3clusterpb.Cluster { + return &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: edsServiceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + } +} + +// TestClientSideAffinitySanityCheck tests that the affinity config can be +// propagated to pick the ring_hash policy. It doesn't test the affinity +// behavior in ring_hash policy. +func (s) TestClientSideAffinitySanityCheck(t *testing.T) { + defer func() func() { + old := envconfig.XDSRingHash + envconfig.XDSRingHash = true + return func() { envconfig.XDSRingHash = old } + }()() + + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelNone, + }) + // Replace RDS and CDS resources with ringhash config, but keep the resource + // names. + resources.Routes = []*v3routepb.RouteConfiguration{hashRouteConfig( + resources.Routes[0].Name, + resources.Listeners[0].Name, + resources.Clusters[0].Name, + )} + resources.Clusters = []*v3clusterpb.Cluster{ringhashCluster( + resources.Clusters[0].Name, + resources.Clusters[0].EdsClusterConfig.ServiceName, + )} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/test/xds/xds_client_custom_lb_test.go b/test/xds/xds_client_custom_lb_test.go new file mode 100644 index 000000000000..87bd437785a6 --- /dev/null +++ b/test/xds/xds_client_custom_lb_test.go @@ -0,0 +1,251 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + _ "google.golang.org/grpc/balancer/weightedroundrobin" // To register weighted_round_robin + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" + + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" + v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + "github.com/golang/protobuf/proto" + structpb "github.com/golang/protobuf/ptypes/struct" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// wrrLocality is a helper that takes a proto message and returns a +// WrrLocalityProto with the proto message marshaled into a proto.Any as a +// child. +func wrrLocality(m proto.Message) *v3wrrlocalitypb.WrrLocality { + return &v3wrrlocalitypb.WrrLocality{ + EndpointPickingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(m), + }, + }, + }, + }, + } +} + +// clusterWithLBConfiguration returns a cluster resource with the proto message +// passed Marshaled to an any and specified through the load_balancing_policy +// field. +func clusterWithLBConfiguration(clusterName, edsServiceName string, secLevel e2e.SecurityLevel, m proto.Message) *v3clusterpb.Cluster { + cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel) + cluster.LoadBalancingPolicy = &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(m), + }, + }, + }, + } + return cluster +} + +// TestWRRLocality tests RPC distribution across a scenario with 5 backends, +// with 2 backends in a locality with weight 1, and 3 backends in a second +// locality with weight 2. Through xDS, the test configures a +// wrr_locality_balancer with either a round robin or custom (specifying pick +// first) child load balancing policy, and asserts the correct distribution +// based on the locality weights and the endpoint picking policy specified. +func (s) TestWrrLocality(t *testing.T) { + oldCustomLBSupport := envconfig.XDSCustomLBPolicy + envconfig.XDSCustomLBPolicy = true + defer func() { + envconfig.XDSCustomLBPolicy = oldCustomLBSupport + }() + + backend1 := stubserver.StartTestService(t, nil) + port1 := testutils.ParsePort(t, backend1.Address) + defer backend1.Stop() + backend2 := stubserver.StartTestService(t, nil) + port2 := testutils.ParsePort(t, backend2.Address) + defer backend2.Stop() + backend3 := stubserver.StartTestService(t, nil) + port3 := testutils.ParsePort(t, backend3.Address) + defer backend3.Stop() + backend4 := stubserver.StartTestService(t, nil) + port4 := testutils.ParsePort(t, backend4.Address) + defer backend4.Stop() + backend5 := stubserver.StartTestService(t, nil) + port5 := testutils.ParsePort(t, backend5.Address) + defer backend5.Stop() + const serviceName = "my-service-client-side-xds" + + tests := []struct { + name string + // Configuration will be specified through load_balancing_policy field. + wrrLocalityConfiguration *v3wrrlocalitypb.WrrLocality + addressDistributionWant []struct { + addr string + count int + } + }{ + { + name: "rr_child", + wrrLocalityConfiguration: wrrLocality(&v3roundrobinpb.RoundRobin{}), + // Each addresses expected probability is locality weight of + // locality / total locality weights multiplied by 1 / number of + // endpoints in each locality (due to round robin across endpoints + // in a locality). Thus, address 1 and address 2 have 1/3 * 1/2 + // probability, and addresses 3 4 5 have 2/3 * 1/3 probability of + // being routed to. + addressDistributionWant: []struct { + addr string + count int + }{ + {addr: backend1.Address, count: 6}, + {addr: backend2.Address, count: 6}, + {addr: backend3.Address, count: 8}, + {addr: backend4.Address, count: 8}, + {addr: backend5.Address, count: 8}, + }, + }, + // This configures custom lb as the child of wrr_locality, which points + // to our pick_first implementation. Thus, the expected distribution of + // addresses is locality weight of locality / total locality weights as + // the probability of picking the first backend within the locality + // (e.g. Address 1 for locality 1, and Address 3 for locality 2). + { + name: "custom_lb_child_pick_first", + wrrLocalityConfiguration: wrrLocality(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/pick_first", + Value: &structpb.Struct{}, + }), + addressDistributionWant: []struct { + addr string + count int + }{ + {addr: backend1.Address, count: 1}, + {addr: backend3.Address, count: 2}, + }, + }, + // Sanity check for weighted round robin. Don't need to test super + // specific behaviors, as that is covered in unit tests. Set up weighted + // round robin as the endpoint picking policy with per RPC load reports + // enabled. Due the server not sending trailers with load reports, the + // weighted round robin policy should essentially function as round + // robin, and thus should have the same distribution as round robin + // above. + { + name: "custom_lb_child_wrr/", + wrrLocalityConfiguration: wrrLocality(&v3clientsideweightedroundrobinpb.ClientSideWeightedRoundRobin{ + EnableOobLoadReport: &wrapperspb.BoolValue{ + Value: false, + }, + // BlackoutPeriod long enough to cause load report weights to + // trigger in the scope of test case, but no load reports + // configured anyway. + BlackoutPeriod: durationpb.New(10 * time.Second), + WeightExpirationPeriod: durationpb.New(10 * time.Second), + WeightUpdatePeriod: durationpb.New(time.Second), + ErrorUtilizationPenalty: &wrapperspb.FloatValue{Value: 1}, + }), + addressDistributionWant: []struct { + addr string + count int + }{ + {addr: backend1.Address, count: 6}, + {addr: backend2.Address, count: 6}, + {addr: backend3.Address, count: 8}, + {addr: backend4.Address, count: 8}, + {addr: backend5.Address, count: 8}, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + managementServer, nodeID, _, r, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + routeConfigName := "route-" + serviceName + clusterName := "cluster-" + serviceName + endpointsName := "endpoints-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(routeConfigName, serviceName, clusterName)}, + Clusters: []*v3clusterpb.Cluster{clusterWithLBConfiguration(clusterName, endpointsName, e2e.SecurityLevelNone, test.wrrLocalityConfiguration)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ + ClusterName: endpointsName, + Host: "localhost", + Localities: []e2e.LocalityOptions{ + { + Backends: []e2e.BackendOptions{{Port: port1}, {Port: port2}}, + Weight: 1, + }, + { + Backends: []e2e.BackendOptions{{Port: port3}, {Port: port4}, {Port: port5}}, + Weight: 2, + }, + }, + })}, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("Failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + var addrDistWant []resolver.Address + for _, addrAndCount := range test.addressDistributionWant { + for i := 0; i < addrAndCount.count; i++ { + addrDistWant = append(addrDistWant, resolver.Address{Addr: addrAndCount.addr}) + } + } + if err := roundrobin.CheckWeightedRoundRobinRPCs(ctx, client, addrDistWant); err != nil { + t.Fatalf("Error in expected round robin: %v", err) + } + }) + } +} diff --git a/test/xds/xds_client_federation_test.go b/test/xds/xds_client_federation_test.go new file mode 100644 index 000000000000..1aebcd226104 --- /dev/null +++ b/test/xds/xds_client_federation_test.go @@ -0,0 +1,276 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// TestClientSideFederation tests that federation is supported. +// +// In this test, some xDS responses contain resource names in another authority +// (in the new resource name style): +// - LDS: old style, no authority (default authority) +// - RDS: new style, in a different authority +// - CDS: old style, no authority (default authority) +// - EDS: new style, in a different authority +func (s) TestClientSideFederation(t *testing.T) { + oldXDSFederation := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldXDSFederation }() + + // Start a management server as the default authority. + serverDefaultAuth, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(serverDefaultAuth.Stop) + + // Start another management server as the other authority. + const nonDefaultAuth = "non-default-auth" + serverAnotherAuth, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(serverAnotherAuth.Stop) + + // Create a bootstrap file in a temporary directory. + nodeID := uuid.New().String() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: serverDefaultAuth.Address, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + // Specify the address of the non-default authority. + Authorities: map[string]string{nonDefaultAuth: serverAnotherAuth.Address}, + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + + resolverBuilder := internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error)) + resolver, err := resolverBuilder(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS resolver for testing: %v", err) + } + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + const serviceName = "my-service-client-side-xds" + // LDS is old style name. + ldsName := serviceName + // RDS is new style, with the non default authority. + rdsName := fmt.Sprintf("xdstp://%s/envoy.config.route.v3.RouteConfiguration/%s", nonDefaultAuth, "route-"+serviceName) + // CDS is old style name. + cdsName := "cluster-" + serviceName + // EDS is new style, with the non default authority. + edsName := fmt.Sprintf("xdstp://%s/envoy.config.route.v3.ClusterLoadAssignment/%s", nonDefaultAuth, "endpoints-"+serviceName) + + // Split resources, put LDS/CDS in the default authority, and put RDS/EDS in + // the other authority. + resourcesDefault := e2e.UpdateOptions{ + NodeID: nodeID, + // This has only LDS and CDS. + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + resourcesAnother := e2e.UpdateOptions{ + NodeID: nodeID, + // This has only RDS and EDS. + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, cdsName)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // This has only LDS and CDS. + if err := serverDefaultAuth.Update(ctx, resourcesDefault); err != nil { + t.Fatal(err) + } + // This has only RDS and EDS. + if err := serverAnotherAuth.Update(ctx, resourcesAnother); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} + +// TestFederation_UnknownAuthorityInDialTarget tests the case where a ClientConn +// is created with a dial target containing an authority which is not specified +// in the bootstrap configuration. The test verifies that RPCs on the ClientConn +// fail with an appropriate error. +func (s) TestFederation_UnknownAuthorityInDialTarget(t *testing.T) { + oldXDSFederation := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldXDSFederation }() + + // Setting up the management server is not *really* required for this test + // case. All we need is a bootstrap configuration which does not contain the + // authority mentioned in the dial target. But setting up the management + // server and actually making an RPC ensures that the xDS client is + // configured properly, and when we dial with an unknown authority in the + // next step, we can be sure that the error we receive is legitimate. + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + target := fmt.Sprintf("xds:///%s", serviceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("Dialing target %q: %v", target, err) + } + defer cc.Close() + t.Log("Created ClientConn to test service") + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() RPC: %v", err) + } + t.Log("Successfully performed an EmptyCall RPC") + + target = fmt.Sprintf("xds://unknown-authority/%s", serviceName) + t.Logf("Dialing target %q with unknown authority which is expected to fail", target) + const wantErr = `authority "unknown-authority" is not found in the bootstrap file` + _, err = grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("grpc.Dial(%q) returned %v, want: %s", target, err, wantErr) + } +} + +// TestFederation_UnknownAuthorityInReceivedResponse tests the case where the +// LDS resource associated with the dial target contains an RDS resource name +// with an authority which is not specified in the bootstrap configuration. The +// test verifies that RPCs fail with an appropriate error. +func (s) TestFederation_UnknownAuthorityInReceivedResponse(t *testing.T) { + oldXDSFederation := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldXDSFederation }() + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + nodeID := uuid.New().String() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + }) + if err != nil { + t.Fatal(err) + } + + resolverBuilder := internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error)) + resolver, err := resolverBuilder(bootstrapContents) + if err != nil { + t.Fatalf("Creating xDS resolver for testing: %v", err) + } + + // LDS is old style name. + // RDS is new style, with an unknown authority. + const serviceName = "my-service-client-side-xds" + const unknownAuthority = "unknown-authority" + ldsName := serviceName + rdsName := fmt.Sprintf("xdstp://%s/envoy.config.route.v3.RouteConfiguration/%s", unknownAuthority, "route-"+serviceName) + + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "cluster-"+serviceName)}, + SkipValidation: true, // This update has only LDS and RDS resources. + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + target := fmt.Sprintf("xds:///%s", serviceName) + cc, err := grpc.Dial(target, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("Dialing target %q: %v", target, err) + } + defer cc.Close() + t.Log("Created ClientConn to test service") + + client := testgrpc.NewTestServiceClient(cc) + _, err = client.EmptyCall(ctx, &testpb.Empty{}) + if err == nil { + t.Fatal("EmptyCall RPC succeeded for target with unknown authority when expected to fail") + } + if got, want := status.Code(err), codes.Unavailable; got != want { + t.Fatalf("EmptyCall RPC returned status code: %v, want %v", got, want) + } + if wantErr := `failed to find authority "unknown-authority"`; !strings.Contains(err.Error(), wantErr) { + t.Fatalf("EmptyCall RPC returned error: %v, want %v", err, wantErr) + } +} diff --git a/test/xds/xds_client_ignore_resource_deletion_test.go b/test/xds/xds_client_ignore_resource_deletion_test.go new file mode 100644 index 000000000000..f5df318a943f --- /dev/null +++ b/test/xds/xds_client_ignore_resource_deletion_test.go @@ -0,0 +1,481 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "net" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds" + + clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +const ( + serviceName = "my-service-xds" + rdsName = "route-" + serviceName + cdsName1 = "cluster1-" + serviceName + cdsName2 = "cluster2-" + serviceName + edsName1 = "eds1-" + serviceName + edsName2 = "eds2-" + serviceName +) + +var ( + // This route configuration resource contains two routes: + // - a route for the EmptyCall rpc, to be sent to cluster1 + // - a route for the UnaryCall rpc, to be sent to cluster2 + defaultRouteConfigWithTwoRoutes = &routepb.RouteConfiguration{ + Name: rdsName, + VirtualHosts: []*routepb.VirtualHost{{ + Domains: []string{serviceName}, + Routes: []*routepb.Route{ + { + Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/EmptyCall"}}, + Action: &routepb.Route_Route{Route: &routepb.RouteAction{ + ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: cdsName1}, + }}, + }, + { + Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/UnaryCall"}}, + Action: &routepb.Route_Route{Route: &routepb.RouteAction{ + ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: cdsName2}, + }}, + }, + }, + }}, + } +) + +// This test runs subtest each for a Listener resource and a Cluster resource deletion +// in the response from the server for the following cases: +// - testResourceDeletionIgnored: When ignore_resource_deletion is set, the +// xDSClient should not delete the resource. +// - testResourceDeletionNotIgnored: When ignore_resource_deletion is unset, +// the xDSClient should delete the resource. +// +// Resource deletion is only applicable to Listener and Cluster resources. +func (s) TestIgnoreResourceDeletionOnClient(t *testing.T) { + server1 := stubserver.StartTestService(t, nil) + t.Cleanup(server1.Stop) + + server2 := stubserver.StartTestService(t, nil) + t.Cleanup(server2.Stop) + + initialResourceOnServer := func(nodeID string) e2e.UpdateOptions { + return e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*routepb.RouteConfiguration{defaultRouteConfigWithTwoRoutes}, + Clusters: []*clusterpb.Cluster{ + e2e.DefaultCluster(cdsName1, edsName1, e2e.SecurityLevelNone), + e2e.DefaultCluster(cdsName2, edsName2, e2e.SecurityLevelNone), + }, + Endpoints: []*endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(edsName1, "localhost", []uint32{testutils.ParsePort(t, server1.Address)}), + e2e.DefaultEndpoint(edsName2, "localhost", []uint32{testutils.ParsePort(t, server2.Address)}), + }, + SkipValidation: true, + } + } + + tests := []struct { + name string + updateResource func(r *e2e.UpdateOptions) + }{ + { + name: "listener", + updateResource: func(r *e2e.UpdateOptions) { + r.Listeners = nil + }, + }, + { + name: "cluster", + updateResource: func(r *e2e.UpdateOptions) { + r.Clusters = nil + }, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s resource deletion ignored", test.name), func(t *testing.T) { + testResourceDeletionIgnored(t, initialResourceOnServer, test.updateResource) + }) + t.Run(fmt.Sprintf("%s resource deletion not ignored", test.name), func(t *testing.T) { + testResourceDeletionNotIgnored(t, initialResourceOnServer, test.updateResource) + }) + } +} + +// This subtest tests the scenario where the bootstrap config has "ignore_resource_deletion" +// set in "server_features" field. This subtest verifies that the resource was +// not deleted by the xDSClient when a resource is missing the xDS response and +// RPCs continue to succeed. +func testResourceDeletionIgnored(t *testing.T, initialResource func(string) e2e.UpdateOptions, updateResource func(r *e2e.UpdateOptions)) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + t.Cleanup(cancel) + mgmtServer := startManagementServer(t) + nodeID := uuid.New().String() + bs := generateBootstrapContents(t, mgmtServer.Address, true, nodeID) + xdsR := xdsResolverBuilder(t, bs) + resources := initialResource(nodeID) + + // Update the management server with initial resources setup. + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR)) + if err != nil { + t.Fatalf("Failed to dial local test server: %v.", err) + } + t.Cleanup(func() { cc.Close() }) + + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + + // Mutate resource and update on the server. + updateResource(&resources) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Make an RPC every 50ms for the next 500ms. This is to ensure that the + // updated resource is received from the management server and is processed by + // gRPC. Since resource deletions are ignored by the xDS client, we expect RPCs + // to all endpoints to keep succeeding. + timer := time.NewTimer(500 * time.Millisecond) + ticker := time.NewTicker(50 * time.Millisecond) + t.Cleanup(ticker.Stop) + for { + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + return + case <-timer.C: + return + case <-ticker.C: + } + } +} + +// This subtest tests the scenario where the bootstrap config has "ignore_resource_deletion" +// not set in "server_features" field. This subtest verifies that the resource was +// deleted by the xDSClient when a resource is missing the xDS response and subsequent +// RPCs fail. +func testResourceDeletionNotIgnored(t *testing.T, initialResource func(string) e2e.UpdateOptions, updateResource func(r *e2e.UpdateOptions)) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout*1000) + t.Cleanup(cancel) + mgmtServer := startManagementServer(t) + nodeID := uuid.New().String() + bs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID) + xdsR := xdsResolverBuilder(t, bs) + resources := initialResource(nodeID) + + // Update the management server with initial resources setup. + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + t.Cleanup(func() { cc.Close() }) + + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + + // Mutate resource and update on the server. + updateResource(&resources) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Spin up go routines to verify RPCs fail after the update. + client := testgrpc.NewTestServiceClient(cc) + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + for ; ctx.Err() == nil; <-time.After(10 * time.Millisecond) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + return + } + } + }() + go func() { + defer wg.Done() + for ; ctx.Err() == nil; <-time.After(10 * time.Millisecond) { + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + return + } + } + }() + + wg.Wait() + if ctx.Err() != nil { + t.Fatal("Context expired before RPCs failed.") + } +} + +// This helper creates a management server for the test. +func startManagementServer(t *testing.T) *e2e.ManagementServer { + t.Helper() + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to start management server: %v", err) + } + t.Cleanup(mgmtServer.Stop) + return mgmtServer +} + +// This helper generates a custom bootstrap config for the test. +func generateBootstrapContents(t *testing.T, serverURI string, ignoreResourceDeletion bool, nodeID string) []byte { + t.Helper() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: serverURI, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + IgnoreResourceDeletion: ignoreResourceDeletion, + }) + if err != nil { + t.Fatal(err) + } + return bootstrapContents +} + +// This helper creates an XDS resolver Builder from the bootstrap config passed +// as parameter. +func xdsResolverBuilder(t *testing.T, bs []byte) resolver.Builder { + t.Helper() + resolverBuilder := internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error)) + xdsR, err := resolverBuilder(bs) + if err != nil { + t.Fatalf("Creating xDS resolver for testing failed for config %q: %v", string(bs), err) + } + return xdsR +} + +// This helper creates an xDS-enabled gRPC server using the listener and the +// bootstrap config passed. It then registers the test service on the newly +// created gRPC server and starts serving. +func setupGRPCServerWithModeChangeChannelAndServe(t *testing.T, bootstrapContents []byte, lis net.Listener) chan connectivity.ServingMode { + t.Helper() + updateCh := make(chan connectivity.ServingMode, 1) + + // Create a server option to get notified about serving mode changes. + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + updateCh <- args.Mode + }) + server := xds.NewGRPCServer(grpc.Creds(insecure.NewCredentials()), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) + t.Cleanup(server.Stop) + testgrpc.RegisterTestServiceServer(server, &testService{}) + + // Serve. + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + return updateCh +} + +// This helper creates a new TCP listener. This helper also uses this listener to +// create a resource update with a listener resource. This helper returns the +// resource update and the TCP listener. +func resourceWithListenerForGRPCServer(t *testing.T, nodeID string) (e2e.UpdateOptions, net.Listener) { + t.Helper() + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + t.Cleanup(func() { lis.Close() }) + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("Failed to retrieve host and port of listener at %q: %v", lis.Addr(), err) + } + listener := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*listenerpb.Listener{listener}, + } + return resources, lis +} + +// This test creates a gRPC server which provides server-side xDS functionality +// by talking to a custom management server. This tests the scenario where bootstrap +// config with "server_features" includes "ignore_resource_deletion". In which +// case, when the listener resource is deleted on the management server, the gRPC +// server should continue to serve RPCs. +func (s) TestListenerResourceDeletionOnServerIgnored(t *testing.T) { + mgmtServer := startManagementServer(t) + nodeID := uuid.New().String() + bs := generateBootstrapContents(t, mgmtServer.Address, true, nodeID) + xdsR := xdsResolverBuilder(t, bs) + resources, lis := resourceWithListenerForGRPCServer(t, nodeID) + modeChangeCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for the server to update to ServingModeServing mode. + select { + case <-ctx.Done(): + t.Fatal("Test timed out waiting for a server to change to ServingModeServing.") + case mode := <-modeChangeCh: + if mode != connectivity.ServingModeServing { + t.Fatalf("Server switched to mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + + // Create a ClientConn and make a successful RPCs. + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + + // Update without a listener resource. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*listenerpb.Listener{}, + }); err != nil { + t.Fatal(err) + } + + // Perform RPCs every 100 ms for 1s and verify that the serving mode does not + // change on gRPC server. + timer := time.NewTimer(500 * time.Millisecond) + ticker := time.NewTicker(50 * time.Millisecond) + t.Cleanup(ticker.Stop) + for { + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + select { + case <-timer.C: + return + case mode := <-modeChangeCh: + t.Fatalf("Server switched to mode: %v when no switch was expected", mode) + case <-ticker.C: + } + } +} + +// This test creates a gRPC server which provides server-side xDS functionality +// by talking to a custom management server. This tests the scenario where bootstrap +// config with "server_features" does not include "ignore_resource_deletion". In +// which case, when the listener resource is deleted on the management server, the +// gRPC server should stop serving RPCs and switch mode to ServingModeNotServing. +func (s) TestListenerResourceDeletionOnServerNotIgnored(t *testing.T) { + mgmtServer := startManagementServer(t) + nodeID := uuid.New().String() + bs := generateBootstrapContents(t, mgmtServer.Address, false, nodeID) + xdsR := xdsResolverBuilder(t, bs) + resources, lis := resourceWithListenerForGRPCServer(t, nodeID) + updateCh := setupGRPCServerWithModeChangeChannelAndServe(t, bs, lis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for the listener to move to "serving" mode. + select { + case <-ctx.Done(): + t.Fatal("Test timed out waiting for a mode change update.") + case mode := <-updateCh: + if mode != connectivity.ServingModeServing { + t.Fatalf("Listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + + // Create a ClientConn and make a successful RPCs. + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(xdsR)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + if err := verifyRPCtoAllEndpoints(cc); err != nil { + t.Fatal(err) + } + + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*listenerpb.Listener{}, // empty listener resource + }); err != nil { + t.Fatal(err) + } + + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh: + if mode != connectivity.ServingModeNotServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) + } + } +} + +// This helper makes both UnaryCall and EmptyCall RPCs using the ClientConn that +// is passed to this function. This helper panics for any failed RPCs. +func verifyRPCtoAllEndpoints(cc grpc.ClientConnInterface) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + return fmt.Errorf("rpc EmptyCall() failed: %v", err) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); err != nil { + return fmt.Errorf("rpc UnaryCall() failed: %v", err) + } + return nil +} diff --git a/test/xds/xds_client_integration_test.go b/test/xds/xds_client_integration_test.go new file mode 100644 index 000000000000..e03c937f816e --- /dev/null +++ b/test/xds/xds_client_integration_test.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. +) + +func (s) TestClientSideXDS(t *testing.T) { + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/test/xds/xds_client_outlier_detection_test.go b/test/xds/xds_client_outlier_detection_test.go new file mode 100644 index 000000000000..d91b35a883aa --- /dev/null +++ b/test/xds/xds_client_outlier_detection_test.go @@ -0,0 +1,325 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// TestOutlierDetection_NoopConfig tests the scenario where the Outlier +// Detection feature is enabled on the gRPC client, but it receives no Outlier +// Detection configuration from the management server. This should result in a +// no-op Outlier Detection configuration being used to configure the Outlier +// Detection balancer. This test verifies that an RPC is able to proceed +// normally with this configuration. +func (s) TestOutlierDetection_NoopConfig(t *testing.T) { + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + server := &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil }, + } + server.StartServer() + t.Logf("Started test service backend at %q", server.Address) + defer server.Stop() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} + +// clientResourcesMultipleBackendsAndOD returns xDS resources which correspond +// to multiple upstreams, corresponding different backends listening on +// different localhost:port combinations. The resources also configure an +// Outlier Detection Balancer configured through the passed in Outlier Detection +// proto. +func clientResourcesMultipleBackendsAndOD(params e2e.ResourceParams, ports []uint32, od *v3clusterpb.OutlierDetection) e2e.UpdateOptions { + routeConfigName := "route-" + params.DialTarget + clusterName := "cluster-" + params.DialTarget + endpointsName := "endpoints-" + params.DialTarget + return e2e.UpdateOptions{ + NodeID: params.NodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(routeConfigName, params.DialTarget, clusterName)}, + Clusters: []*v3clusterpb.Cluster{clusterWithOutlierDetection(clusterName, endpointsName, params.SecLevel, od)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, params.Host, ports)}, + } +} + +func clusterWithOutlierDetection(clusterName, edsServiceName string, secLevel e2e.SecurityLevel, od *v3clusterpb.OutlierDetection) *v3clusterpb.Cluster { + cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel) + cluster.OutlierDetection = od + return cluster +} + +// checkRoundRobinRPCs verifies that EmptyCall RPCs on the given ClientConn, +// connected to a server exposing the test.grpc_testing.TestService, are +// roundrobined across the given backend addresses. +// +// Returns a non-nil error if context deadline expires before RPCs start to get +// roundrobined across the given backends. +func checkRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + // Perform 3 iterations. + var iterations [][]string + for i := 0; i < 3; i++ { + iteration := make([]string, len(addrs)) + for c := 0; c < len(addrs); c++ { + var peer peer.Peer + client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)) + if peer.Addr != nil { + iteration[c] = peer.Addr.String() + } + } + iterations = append(iterations, iteration) + } + // Ensure the the first iteration contains all addresses in addrs. + gotAddrCount := make(map[string]int) + for _, addr := range iterations[0] { + gotAddrCount[addr]++ + } + if diff := cmp.Diff(gotAddrCount, wantAddrCount); diff != "" { + continue + } + // Ensure all three iterations contain the same addresses. + if !cmp.Equal(iterations[0], iterations[1]) || !cmp.Equal(iterations[0], iterations[2]) { + continue + } + return nil + } + return fmt.Errorf("timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) +} + +// TestOutlierDetectionWithOutlier tests the Outlier Detection Balancer e2e. It +// spins up three backends, one which consistently errors, and configures the +// ClientConn using xDS to connect to all three of those backends. The Outlier +// Detection Balancer should eject the connection to the backend which +// constantly errors, causing RPC's to not be routed to that upstream, and only +// be Round Robined across the two healthy upstreams. Other than the intervals +// the unhealthy upstream is ejected, RPC's should regularly round robin across +// all three upstreams. +func (s) TestOutlierDetectionWithOutlier(t *testing.T) { + managementServer, nodeID, _, r, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Working backend 1. + backend1 := stubserver.StartTestService(t, nil) + port1 := testutils.ParsePort(t, backend1.Address) + defer backend1.Stop() + + // Working backend 2. + backend2 := stubserver.StartTestService(t, nil) + port2 := testutils.ParsePort(t, backend2.Address) + defer backend2.Stop() + + // Backend 3 that will always return an error and eventually ejected. + backend3 := stubserver.StartTestService(t, &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return nil, errors.New("some error") }, + }) + port3 := testutils.ParsePort(t, backend3.Address) + defer backend3.Stop() + + const serviceName = "my-service-client-side-xds" + resources := clientResourcesMultipleBackendsAndOD(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + SecLevel: e2e.SecurityLevelNone, + }, []uint32{port1, port2, port3}, &v3clusterpb.OutlierDetection{ + Interval: &durationpb.Duration{Nanos: 50000000}, // .5 seconds + BaseEjectionTime: &durationpb.Duration{Seconds: 30}, + MaxEjectionTime: &durationpb.Duration{Seconds: 300}, + MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 1}, + FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 50}, + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 100}, + FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 8}, + FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 3}, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + + fullAddresses := []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend2.Address}, + {Addr: backend3.Address}, + } + // At first, due to no statistics on each of the backends, the 3 + // upstreams should all be round robined across. + if err = checkRoundRobinRPCs(ctx, client, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The addresses which don't return errors. + okAddresses := []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend2.Address}, + } + // After calling the three upstreams, one of them constantly error + // and should eventually be ejected for a period of time. This + // period of time should cause the RPC's to be round robined only + // across the two that are healthy. + if err = checkRoundRobinRPCs(ctx, client, okAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } +} + +// TestOutlierDetectionXDSDefaultOn tests that Outlier Detection is by default +// configured on in the xDS Flow. If the Outlier Detection proto message is +// present with SuccessRateEjection unset, then Outlier Detection should be +// turned on. The test setups and xDS system with xDS resources with Outlier +// Detection present in the CDS update, but with SuccessRateEjection unset, and +// asserts that Outlier Detection is turned on and ejects upstreams. +func (s) TestOutlierDetectionXDSDefaultOn(t *testing.T) { + managementServer, nodeID, _, r, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Working backend 1. + backend1 := stubserver.StartTestService(t, nil) + port1 := testutils.ParsePort(t, backend1.Address) + defer backend1.Stop() + + // Working backend 2. + backend2 := stubserver.StartTestService(t, nil) + port2 := testutils.ParsePort(t, backend2.Address) + defer backend2.Stop() + + // Backend 3 that will always return an error and eventually ejected. + backend3 := stubserver.StartTestService(t, &stubserver.StubServer{ + EmptyCallF: func(context.Context, *testpb.Empty) (*testpb.Empty, error) { return nil, errors.New("some error") }, + }) + port3 := testutils.ParsePort(t, backend3.Address) + defer backend3.Stop() + + // Configure CDS resources with Outlier Detection set but + // EnforcingSuccessRate unset. This should cause Outlier Detection to be + // configured with SuccessRateEjection present in configuration, which will + // eventually be populated with its default values along with the knobs set + // as SuccessRate fields in the proto, and thus Outlier Detection should be + // on and actively eject upstreams. + const serviceName = "my-service-client-side-xds" + resources := clientResourcesMultipleBackendsAndOD(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + SecLevel: e2e.SecurityLevelNone, + }, []uint32{port1, port2, port3}, &v3clusterpb.OutlierDetection{ + // Need to set knobs to trigger ejection within the test time frame. + Interval: &durationpb.Duration{Nanos: 50000000}, + // EnforcingSuccessRateSet to nil, causes success rate algorithm to be + // turned on. + SuccessRateMinimumHosts: &wrapperspb.UInt32Value{Value: 1}, + SuccessRateRequestVolume: &wrapperspb.UInt32Value{Value: 8}, + SuccessRateStdevFactor: &wrapperspb.UInt32Value{Value: 1}, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + + fullAddresses := []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend2.Address}, + {Addr: backend3.Address}, + } + // At first, due to no statistics on each of the backends, the 3 + // upstreams should all be round robined across. + if err = checkRoundRobinRPCs(ctx, client, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The addresses which don't return errors. + okAddresses := []resolver.Address{ + {Addr: backend1.Address}, + {Addr: backend2.Address}, + } + // After calling the three upstreams, one of them constantly error + // and should eventually be ejected for a period of time. This + // period of time should cause the RPC's to be round robined only + // across the two that are healthy. + if err = checkRoundRobinRPCs(ctx, client, okAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } +} diff --git a/test/xds/xds_client_retry_test.go b/test/xds/xds_client_retry_test.go new file mode 100644 index 000000000000..d7cb7b4bfb3c --- /dev/null +++ b/test/xds/xds_client_retry_test.go @@ -0,0 +1,181 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func (s) TestClientSideRetry(t *testing.T) { + ctr := 0 + errs := []codes.Code{codes.ResourceExhausted} + + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + server := stubserver.StartTestService(t, &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + defer func() { ctr++ }() + if ctr < len(errs) { + return nil, status.Errorf(errs[ctr], "this should be retried") + } + return &testpb.Empty{}, nil + }, + }) + defer server.Stop() + + const serviceName = "my-service-client-side-xds" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelNone, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + defer cancel() + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != codes.ResourceExhausted { + t.Fatalf("rpc EmptyCall() = _, %v; want _, ResourceExhausted", err) + } + + testCases := []struct { + name string + vhPolicy *v3routepb.RetryPolicy + routePolicy *v3routepb.RetryPolicy + errs []codes.Code // the errors returned by the server for each RPC + tryAgainErr codes.Code // the error that would be returned if we are still using the old retry policies. + errWant codes.Code + }{{ + name: "virtualHost only, fail", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted,unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 1}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + routePolicy: nil, + tryAgainErr: codes.ResourceExhausted, + errWant: codes.Unavailable, + }, { + name: "virtualHost only", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted, unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + routePolicy: nil, + tryAgainErr: codes.Unavailable, + errWant: codes.OK, + }, { + name: "virtualHost+route, fail", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted,unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.ResourceExhausted, codes.Unavailable}, + tryAgainErr: codes.OK, + errWant: codes.Unavailable, + }, { + name: "virtualHost+route", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "resource-exhausted", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + errs: []codes.Code{codes.Unavailable}, + tryAgainErr: codes.Unavailable, + errWant: codes.OK, + }, { + name: "virtualHost+route, not enough attempts", + vhPolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 2}, + }, + routePolicy: &v3routepb.RetryPolicy{ + RetryOn: "unavailable", + NumRetries: &wrapperspb.UInt32Value{Value: 1}, + }, + errs: []codes.Code{codes.Unavailable, codes.Unavailable}, + tryAgainErr: codes.OK, + errWant: codes.Unavailable, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + errs = tc.errs + + // Confirm tryAgainErr is correct before updating resources. + ctr = 0 + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code != tc.tryAgainErr { + t.Fatalf("with old retry policy: EmptyCall() = _, %v; want _, %v", err, tc.tryAgainErr) + } + + resources.Routes[0].VirtualHosts[0].RetryPolicy = tc.vhPolicy + resources.Routes[0].VirtualHosts[0].Routes[0].GetRoute().RetryPolicy = tc.routePolicy + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + for { + ctr = 0 + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code == tc.tryAgainErr { + continue + } else if code != tc.errWant { + t.Fatalf("rpc EmptyCall() = _, %v; want _, %v", err, tc.errWant) + } + break + } + }) + } +} diff --git a/test/xds/xds_rls_clusterspecifier_plugin_test.go b/test/xds/xds_rls_clusterspecifier_plugin_test.go new file mode 100644 index 000000000000..bca198081a7c --- /dev/null +++ b/test/xds/xds_rls_clusterspecifier_plugin_test.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/rls" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/protobuf/types/known/durationpb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/balancer/rls" // Register the RLS Load Balancing policy. +) + +// defaultClientResourcesWithRLSCSP returns a set of resources (LDS, RDS, CDS, EDS) for a +// client to connect to a server with a RLS Load Balancer as a child of Cluster Manager. +func defaultClientResourcesWithRLSCSP(lb e2e.LoadBalancingPolicy, params e2e.ResourceParams, rlsProto *rlspb.RouteLookupConfig) e2e.UpdateOptions { + routeConfigName := "route-" + params.DialTarget + clusterName := "cluster-" + params.DialTarget + endpointsName := "endpoints-" + params.DialTarget + return e2e.UpdateOptions{ + NodeID: params.NodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(params.DialTarget, routeConfigName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: routeConfigName, + ListenerName: params.DialTarget, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "rls-csp", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ + RouteLookupConfig: rlsProto, + }), + })}, + Clusters: []*v3clusterpb.Cluster{e2e.ClusterResourceWithOptions(e2e.ClusterOptions{ + ClusterName: clusterName, + ServiceName: endpointsName, + Policy: lb, + SecurityLevel: params.SecLevel, + })}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(endpointsName, params.Host, []uint32{params.Port})}, + } +} + +// TestRLSinxDS tests an xDS configured system with an RLS Balancer present. +// +// This test sets up the RLS Balancer using the RLS Cluster Specifier Plugin, +// spins up a test service and has a fake RLS Server correctly respond with a +// target corresponding to this test service. This test asserts an RPC proceeds +// as normal with the RLS Balancer as part of system. +func (s) TestRLSinxDS(t *testing.T) { + tests := []struct { + name string + lbPolicy e2e.LoadBalancingPolicy + }{ + { + name: "roundrobin", + lbPolicy: e2e.LoadBalancingPolicyRoundRobin, + }, + { + name: "ringhash", + lbPolicy: e2e.LoadBalancingPolicyRingHash, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testRLSinxDS(t, test.lbPolicy) + }) + } +} + +func testRLSinxDS(t *testing.T, lbPolicy e2e.LoadBalancingPolicy) { + oldRLS := envconfig.XDSRLS + envconfig.XDSRLS = true + internal.RegisterRLSClusterSpecifierPluginForTesting() + defer func() { + envconfig.XDSRLS = oldRLS + internal.UnregisterRLSClusterSpecifierPluginForTesting() + }() + + // Set up all components and configuration necessary - management server, + // xDS resolver, fake RLS Server, and xDS configuration which specifies an + // RLS Balancer that communicates to this set up fake RLS Server. + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + lis := testutils.NewListenerWrapper(t, nil) + rlsServer, rlsRequestCh := rls.SetupFakeRLSServer(t, lis) + rlsProto := &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{{Names: []*rlspb.GrpcKeyBuilder_Name{{Service: "grpc.testing.TestService"}}}}, + LookupService: rlsServer.Address, + LookupServiceTimeout: durationpb.New(defaultTestTimeout), + CacheSizeBytes: 1024, + } + + const serviceName = "my-service-client-side-xds" + resources := defaultClientResourcesWithRLSCSP(lbPolicy, e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelNone, + }, rlsProto) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Configure the fake RLS Server to set the RLS Balancers child CDS + // Cluster's name as the target for the RPC to use. + rlsServer.SetResponseCallback(func(_ context.Context, req *rlspb.RouteLookupRequest) *rls.RouteLookupResponse { + return &rls.RouteLookupResponse{Resp: &rlspb.RouteLookupResponse{Targets: []string{"cluster-" + serviceName}}} + }) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + // Successfully sending the RPC will require the RLS Load Balancer to + // communicate with the fake RLS Server for information about the target. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // These RLS Verifications makes sure the RLS Load Balancer is actually part + // of the xDS Configured system that correctly sends out RPC. + + // Verify connection is established to RLS Server. + if _, err = lis.NewConnCh.Receive(ctx); err != nil { + t.Fatal("Timeout when waiting for RLS LB policy to create control channel") + } + + // Verify an rls request is sent out to fake RLS Server. + select { + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for an RLS request to be sent out") + case <-rlsRequestCh: + } +} diff --git a/test/xds/xds_security_config_nack_test.go b/test/xds/xds_security_config_nack_test.go new file mode 100644 index 000000000000..1dc3250935bf --- /dev/null +++ b/test/xds/xds_security_config_nack_test.go @@ -0,0 +1,367 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func (s) TestUnmarshalListener_WithUpdateValidatorFunc(t *testing.T) { + const ( + serviceName = "my-service-client-side-xds" + missingIdentityProviderInstance = "missing-identity-provider-instance" + missingRootProviderInstance = "missing-root-provider-instance" + ) + + tests := []struct { + name string + securityConfig *v3corepb.TransportSocket + wantErr bool + }{ + { + name: "both identity and root providers are not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingIdentityProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingRootProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "only identity provider is not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingIdentityProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ServerSideCertProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "only root provider is not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ServerSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingRootProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "both identity and root providers are present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ServerSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ServerSideCertProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS + // resources corresponding to it. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + + // Create xDS resources to be consumed on the client side. This + // includes the listener, route configuration, cluster (with + // security configuration) and endpoint resources. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelMTLS, + }) + + // Create an inbound xDS listener resource for the server side. + inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) + for _, fc := range inboundLis.GetFilterChains() { + fc.TransportSocket = test.securityConfig + } + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make an RPC. + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Make a context with a shorter timeout from the top level test + // context for cases where we expect failures. + timeout := defaultTestTimeout + if test.wantErr { + timeout = defaultTestShortTimeout + } + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); (err != nil) != test.wantErr { + t.Fatalf("EmptyCall() returned err: %v, wantErr %v", err, test.wantErr) + } + }) + } +} + +func (s) TestUnmarshalCluster_WithUpdateValidatorFunc(t *testing.T) { + const ( + serviceName = "my-service-client-side-xds" + missingIdentityProviderInstance = "missing-identity-provider-instance" + missingRootProviderInstance = "missing-root-provider-instance" + ) + + tests := []struct { + name string + securityConfig *v3corepb.TransportSocket + wantErr bool + }{ + { + name: "both identity and root providers are not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingIdentityProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingRootProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "only identity provider is not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingIdentityProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ClientSideCertProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "only root provider is not present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ClientSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: missingRootProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: true, + }, + { + name: "both identity and root providers are present in bootstrap", + securityConfig: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ClientSideCertProviderInstance, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: e2e.ClientSideCertProviderInstance, + }, + }, + }, + }, + }), + }, + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // SetupManagementServer() sets up a bootstrap file with certificate + // provider instance names: `e2e.ServerSideCertProviderInstance` and + // `e2e.ClientSideCertProviderInstance`. + managementServer, nodeID, _, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // This creates a `Cluster` resource with a security config which + // refers to `e2e.ClientSideCertProviderInstance` for both root and + // identity certs. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelMTLS, + }) + resources.Clusters[0].TransportSocket = test.securityConfig + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Make a context with a shorter timeout from the top level test + // context for cases where we expect failures. + timeout := defaultTestTimeout + if test.wantErr { + timeout = defaultTestShortTimeout + } + ctx2, cancel2 := context.WithTimeout(ctx, timeout) + defer cancel2() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx2, &testpb.Empty{}, grpc.WaitForReady(true)); (err != nil) != test.wantErr { + t.Fatalf("EmptyCall() returned err: %v, wantErr %v", err, test.wantErr) + } + }) + } +} diff --git a/test/xds/xds_server_integration_test.go b/test/xds/xds_server_integration_test.go new file mode 100644 index 000000000000..829843b19c48 --- /dev/null +++ b/test/xds/xds_server_integration_test.go @@ -0,0 +1,370 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "net" + "strconv" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +type testService struct { + testgrpc.TestServiceServer +} + +func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil +} + +func (*testService) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{}, nil +} + +// setupGRPCServer performs the following: +// - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and +// register the test service on it +// - create a local TCP listener and start serving on it +// +// Returns the following: +// - local listener on which the xDS-enabled gRPC server is serving on +// - cleanup function to be invoked by the tests when done +func setupGRPCServer(t *testing.T, bootstrapContents []byte) (net.Listener, func()) { + t.Helper() + + // Configure xDS credentials to be used on the server-side. + creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a server option to get notified about serving mode changes. We don't + // do anything other than throwing a log entry here. But this is required, + // since the server code emits a log entry at the default level (which is + // ERROR) if no callback is registered for serving mode changes. Our + // testLogger fails the test if there is any log entry at ERROR level. It does + // provide an ExpectError() method, but that takes a string and it would be + // painful to construct the exact error message expected here. Instead this + // works just fine. + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + }) + + // Initialize an xDS-enabled gRPC server and register the stubServer on it. + server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) + testgrpc.RegisterTestServiceServer(server, &testService{}) + + // Create a local listener and pass it to Serve(). + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + return lis, func() { + server.Stop() + } +} + +func hostPortFromListener(lis net.Listener) (string, uint32, error) { + host, p, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + return "", 0, fmt.Errorf("net.SplitHostPort(%s) failed: %v", lis.Addr().String(), err) + } + port, err := strconv.ParseInt(p, 10, 32) + if err != nil { + return "", 0, fmt.Errorf("strconv.ParseInt(%s, 10, 32) failed: %v", p, err) + } + return host, uint32(port), nil +} + +// TestServerSideXDS_Fallback is an e2e test which verifies xDS credentials +// fallback functionality. +// +// The following sequence of events happen as part of this test: +// - An xDS-enabled gRPC server is created and xDS credentials are configured. +// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS +// credentials are configured. +// - Control plane is configured to not send any security configuration to both +// the client and the server. This results in both of them using the +// configured fallback credentials (which is insecure creds in this case). +func (s) TestServerSideXDS_Fallback(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS resources + // corresponding to it. This contains default resources with no security + // configuration in the Cluster resources. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + + // Create an inbound xDS listener resource for the server side that does not + // contain any security configuration. This should force the server-side + // xdsCredentials to use fallback. + inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources.Listeners = append(resources.Listeners, inboundLis) + + // Setup the management server with client and server-side resources. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make a successful RPC. + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Errorf("rpc EmptyCall() failed: %v", err) + } +} + +// TestServerSideXDS_FileWatcherCerts is an e2e test which verifies xDS +// credentials with file watcher certificate provider. +// +// The following sequence of events happen as part of this test: +// - An xDS-enabled gRPC server is created and xDS credentials are configured. +// - xDS is enabled on the client by the use of the xds:/// scheme, and xDS +// credentials are configured. +// - Control plane is configured to send security configuration to both the +// client and the server, pointing to the file watcher certificate provider. +// We verify both TLS and mTLS scenarios. +func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { + tests := []struct { + name string + secLevel e2e.SecurityLevel + }{ + { + name: "tls", + secLevel: e2e.SecurityLevelTLS, + }, + { + name: "mtls", + secLevel: e2e.SecurityLevelMTLS, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS + // resources corresponding to it. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + + // Create xDS resources to be consumed on the client side. This + // includes the listener, route configuration, cluster (with + // security configuration) and endpoint resources. + serviceName := "my-service-file-watcher-certs-" + test.name + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: test.secLevel, + }) + + // Create an inbound xDS listener resource for the server side that + // contains security configuration pointing to the file watcher + // plugin. + inboundLis := e2e.DefaultServerListener(host, port, test.secLevel) + resources.Listeners = append(resources.Listeners, inboundLis) + + // Setup the management server with client and server resources. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make an RPC. + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(creds), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + }) + } +} + +// TestServerSideXDS_SecurityConfigChange is an e2e test where xDS is enabled on +// the server-side and xdsCredentials are configured for security. The control +// plane initially does not any security configuration. This forces the +// xdsCredentials to use fallback creds, which is this case is insecure creds. +// We verify that a client connecting with TLS creds is not able to successfully +// make an RPC. The control plane then sends a listener resource with security +// configuration pointing to the use of the file_watcher plugin and we verify +// that the same client is now able to successfully make an RPC. +func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + // Grab the host and port of the server and create client side xDS resources + // corresponding to it. This contains default resources with no security + // configuration in the Cluster resource. This should force the xDS + // credentials on the client to use its fallback. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-security-config-change" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + + // Create an inbound xDS listener resource for the server side that does not + // contain any security configuration. This should force the xDS credentials + // on server to use its fallback. + inboundLis := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources.Listeners = append(resources.Listeners, inboundLis) + + // Setup the management server with client and server-side resources. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create client-side xDS credentials with an insecure fallback. + xdsCreds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a ClientConn with the xds scheme and make a successful RPC. + xdsCC, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(xdsCreds), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer xdsCC.Close() + + client := testgrpc.NewTestServiceClient(xdsCC) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Create a ClientConn with TLS creds. This should fail since the server is + // using fallback credentials which in this case in insecure creds. + tlsCreds := e2e.CreateClientTLSCredentials(t) + tlsCC, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(tlsCreds)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer tlsCC.Close() + + // We don't set 'waitForReady` here since we want this call to failfast. + client = testgrpc.NewTestServiceClient(tlsCC) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { + t.Fatal("rpc EmptyCall() succeeded when expected to fail") + } + + // Switch server and client side resources with ones that contain required + // security configuration for mTLS with a file watcher certificate provider. + resources = e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelMTLS, + }) + inboundLis = e2e.DefaultServerListener(host, port, e2e.SecurityLevelMTLS) + resources.Listeners = append(resources.Listeners, inboundLis) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Make another RPC with `waitForReady` set and expect this to succeed. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/test/xds/xds_server_rbac_test.go b/test/xds/xds_server_rbac_test.go new file mode 100644 index 000000000000..914b59db31c6 --- /dev/null +++ b/test/xds/xds_server_rbac_test.go @@ -0,0 +1,993 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + "testing" + + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/authz/audit" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/structpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// TestServerSideXDS_RouteConfiguration is an e2e test which verifies routing +// functionality. The xDS enabled server will be set up with route configuration +// where the route configuration has routes with the correct routing actions +// (NonForwardingAction), and the RPC's matching those routes should proceed as +// normal. +func (s) TestServerSideXDS_RouteConfiguration(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + + // Create an inbound xDS listener resource with route configuration which + // selectively will allow RPC's through or not. This will test routing in + // xds(Unary|Stream)Interceptors. + vhs := []*v3routepb.VirtualHost{ + // Virtual host that will never be matched to test Virtual Host selection. + { + Domains: []string{"this will not match*"}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }, + }, + }, + // This Virtual Host will actually get matched to. + { + Domains: []string{"*"}, + Routes: []*v3routepb.Route{ + // A routing rule that can be selectively triggered based on properties about incoming RPC. + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/EmptyCall"}, + // "Fully-qualified RPC method name with leading slash. Same as :path header". + }, + // Correct Action, so RPC's that match this route should proceed to interceptor processing. + Action: &v3routepb.Route_NonForwardingAction{}, + }, + // This routing rule is matched the same way as the one above, + // except has an incorrect action for the server side. However, + // since routing chooses the first route which matches an + // incoming RPC, this should never get invoked (iteration + // through this route slice is deterministic). + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/EmptyCall"}, + // "Fully-qualified RPC method name with leading slash. Same as :path header". + }, + // Incorrect Action, so RPC's that match this route should get denied. + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: ""}}, + }, + }, + // Another routing rule that can be selectively triggered based on incoming RPC. + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/UnaryCall"}, + }, + // Wrong action (!Non_Forwarding_Action) so RPC's that match this route should get denied. + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: ""}}, + }, + }, + // Another routing rule that can be selectively triggered based on incoming RPC. + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/grpc.testing.TestService/StreamingInputCall"}, + }, + // Wrong action (!Non_Forwarding_Action) so RPC's that match this route should get denied. + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: ""}}, + }, + }, + // Not matching route, this is be able to get invoked logically (i.e. doesn't have to match the Route configurations above). + }}, + } + inboundLis := &v3listenerpb.Listener{ + Name: fmt.Sprintf(e2e.ServerListenerResourceNameTemplate, net.JoinHostPort(host, strconv.Itoa(int(port)))), + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "v4-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: vhs, + }, + }, + }), + }, + }, + }, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: vhs, + }, + }, + }), + }, + }, + }, + }, + }, + } + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setup the management server with client and server-side resources. + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + + // This Empty Call should match to a route with a correct action + // (NonForwardingAction). Thus, this RPC should proceed as normal. There is + // a routing rule that this RPC would match to that has an incorrect action, + // but the server should only use the first route matched to with the + // correct action. + if _, err = client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // This Unary Call should match to a route with an incorrect action. Thus, + // this RPC should not go through as per A36, and this call should receive + // an error with codes.Unavailable. + if _, err = client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable { + t.Fatalf("client.UnaryCall() = _, %v, want _, error code %s", err, codes.Unavailable) + } + + // This Streaming Call should match to a route with an incorrect action. + // Thus, this RPC should not go through as per A36, and this call should + // receive an error with codes.Unavailable. + stream, err := client.StreamingInputCall(ctx) + if err != nil { + t.Fatalf("StreamingInputCall(_) = _, %v, want ", err) + } + if _, err = stream.CloseAndRecv(); status.Code(err) != codes.Unavailable || !strings.Contains(err.Error(), "the incoming RPC matched to a route that was not of action type non forwarding") { + t.Fatalf("streaming RPC should have been denied") + } + + // This Full Duplex should not match to a route, and thus should return an + // error and not proceed. + dStream, err := client.FullDuplexCall(ctx) + if err != nil { + t.Fatalf("FullDuplexCall(_) = _, %v, want ", err) + } + if _, err = dStream.Recv(); status.Code(err) != codes.Unavailable || !strings.Contains(err.Error(), "the incoming RPC did not match a configured Route") { + t.Fatalf("streaming RPC should have been denied") + } +} + +// serverListenerWithRBACHTTPFilters returns an xds Listener resource with HTTP Filters defined in the HCM, and a route +// configuration that always matches to a route and a VH. +func serverListenerWithRBACHTTPFilters(host string, port uint32, rbacCfg *rpb.RBAC) *v3listenerpb.Listener { + // Rather than declare typed config inline, take a HCM proto and append the + // RBAC Filters to it. + hcm := &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"*"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}, + // This tests override parsing + building when RBAC Filter + // passed both normal and override config. + TypedPerFilterConfig: map[string]*anypb.Any{ + "rbac": testutils.MarshalAny(&rpb.RBACPerRoute{Rbac: rbacCfg}), + }, + }}}, + }, + } + hcm.HttpFilters = nil + hcm.HttpFilters = append(hcm.HttpFilters, e2e.HTTPFilter("rbac", rbacCfg)) + hcm.HttpFilters = append(hcm.HttpFilters, e2e.RouterHTTPFilter) + + return &v3listenerpb.Listener{ + Name: fmt.Sprintf(e2e.ServerListenerResourceNameTemplate, net.JoinHostPort(host, strconv.Itoa(int(port)))), + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "v4-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(hcm), + }, + }, + }, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(hcm), + }, + }, + }, + }, + }, + } +} + +// TestRBACHTTPFilter tests the xds configured RBAC HTTP Filter. It sets up the +// full end to end flow, and makes sure certain RPC's are successful and proceed +// as normal and certain RPC's are denied by the RBAC HTTP Filter which gets +// called by hooked xds interceptors. +func (s) TestRBACHTTPFilter(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + internal.RegisterRBACHTTPFilterForTesting() + defer internal.UnregisterRBACHTTPFilterForTesting() + tests := []struct { + name string + rbacCfg *rpb.RBAC + wantStatusEmptyCall codes.Code + wantStatusUnaryCall codes.Code + wantAuthzOutcomes map[bool]int + eventContent *audit.Event + }{ + // This test tests an RBAC HTTP Filter which is configured to allow any RPC. + // Any RPC passing through this RBAC HTTP Filter should proceed as normal. + { + name: "allow-anything", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + AuditLoggingOptions: &v3rbacpb.RBAC_AuditLoggingOptions{ + AuditCondition: v3rbacpb.RBAC_AuditLoggingOptions_ON_ALLOW, + LoggerConfigs: []*v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConfig{ + { + AuditLogger: &v3corepb.TypedExtensionConfig{ + Name: "stat_logger", + TypedConfig: createXDSTypedStruct(t, map[string]interface{}{}, "stat_logger"), + }, + IsOptional: false, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + wantAuthzOutcomes: map[bool]int{true: 2, false: 0}, + // TODO(gtcooke94) add policy name (RBAC filter name) once + // https://github.com/grpc/grpc-go/pull/6327 is merged. + eventContent: &audit.Event{ + FullMethodName: "/grpc.testing.TestService/UnaryCall", + MatchedRule: "anyone", + Authorized: true, + }, + }, + // This test tests an RBAC HTTP Filter which is configured to allow only + // RPC's with certain paths ("UnaryCall"). Only unary calls passing + // through this RBAC HTTP Filter should proceed as normal, and any + // others should be denied. + { + name: "allow-certain-path", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "certain-path": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_UrlPath{UrlPath: &v3matcherpb.PathMatcher{Rule: &v3matcherpb.PathMatcher_Path{Path: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "/grpc.testing.TestService/UnaryCall"}}}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.PermissionDenied, + wantStatusUnaryCall: codes.OK, + }, + // This test that a RBAC Config with nil rules means that every RPC is + // allowed. This maps to the line "If absent, no enforcing RBAC policy + // will be applied" from the RBAC Proto documentation for the Rules + // field. + { + name: "absent-rules", + rbacCfg: &rpb.RBAC{ + Rules: nil, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, + // The two tests below test that configuring the xDS RBAC HTTP Filter + // with :authority and host header matchers end up being logically + // equivalent. This represents functionality from this line in A41 - + // "As documented for HeaderMatcher, Envoy aliases :authority and Host + // in its header map implementation, so they should be treated + // equivalent for the RBAC matchers; there must be no behavior change + // depending on which of the two header names is used in the RBAC + // policy." + + // This test tests an xDS RBAC Filter with an :authority header matcher. + { + name: "match-on-authority", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "match-on-authority": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":authority", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "my-service-fallback"}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, + // This test tests that configuring an xDS RBAC Filter with a host + // header matcher has the same behavior as if it was configured with + // :authority. Since host and authority are aliased, this should still + // continue to match on incoming RPC's :authority, just as the test + // above. + { + name: "match-on-host", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "match-on-authority": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: "host", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "my-service-fallback"}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, + // This test tests that the RBAC HTTP Filter hard codes the :method + // header to POST. Since the RBAC Configuration says to deny every RPC + // with a method :POST, every RPC tried should be denied. + { + name: "deny-post", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_DENY, + Policies: map[string]*v3rbacpb.Policy{ + "post-method": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "POST"}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.PermissionDenied, + wantStatusUnaryCall: codes.PermissionDenied, + }, + // This test tests that RBAC ignores the TE: trailers header (which is + // hardcoded in http2_client.go for every RPC). Since the RBAC + // Configuration says to only ALLOW RPC's with a TE: Trailers, every RPC + // tried should be denied. + { + name: "allow-only-te", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_ALLOW, + Policies: map[string]*v3rbacpb.Policy{ + "post-method": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Header{Header: &v3routepb.HeaderMatcher{Name: "TE", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_ExactMatch{ExactMatch: "trailers"}}}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.PermissionDenied, + wantStatusUnaryCall: codes.PermissionDenied, + }, + // This test tests that an RBAC Config with Action.LOG configured allows + // every RPC through. This maps to the line "At this time, if the + // RBAC.action is Action.LOG then the policy will be completely ignored, + // as if RBAC was not configurated." from A41 + { + name: "action-log", + rbacCfg: &rpb.RBAC{ + Rules: &v3rbacpb.RBAC{ + Action: v3rbacpb.RBAC_LOG, + Policies: map[string]*v3rbacpb.Policy{ + "anyone": { + Permissions: []*v3rbacpb.Permission{ + {Rule: &v3rbacpb.Permission_Any{Any: true}}, + }, + Principals: []*v3rbacpb.Principal{ + {Identifier: &v3rbacpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }, + wantStatusEmptyCall: codes.OK, + wantStatusUnaryCall: codes.OK, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + func() { + lb := &loggerBuilder{ + authzDecisionStat: map[bool]int{true: 0, false: 0}, + lastEvent: &audit.Event{}, + } + audit.RegisterLoggerBuilder(lb) + + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + inboundLis := serverListenerWithRBACHTTPFilters(host, port, test.rbacCfg) + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setup the management server with client and server-side resources. + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); status.Code(err) != test.wantStatusEmptyCall { + t.Fatalf("EmptyCall() returned err with status: %v, wantStatusEmptyCall: %v", status.Code(err), test.wantStatusEmptyCall) + } + + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != test.wantStatusUnaryCall { + t.Fatalf("UnaryCall() returned err with status: %v, wantStatusUnaryCall: %v", err, test.wantStatusUnaryCall) + } + + // Toggle the RBAC Env variable off, this should disable RBAC and allow any RPC"s through (will not go through + // routing or processed by HTTP Filters and thus will never get denied by RBAC). + envconfig.XDSRBAC = false + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { + t.Fatalf("EmptyCall() returned err with status: %v, once RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.OK { + t.Fatalf("UnaryCall() returned err with status: %v, once RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + // Toggle RBAC back on for next iterations. + envconfig.XDSRBAC = true + + if test.wantAuthzOutcomes != nil { + if diff := cmp.Diff(lb.authzDecisionStat, test.wantAuthzOutcomes); diff != "" { + t.Fatalf("authorization decision do not match\ndiff (-got +want):\n%s", diff) + } + } + if test.eventContent != nil { + if diff := cmp.Diff(lb.lastEvent, test.eventContent); diff != "" { + t.Fatalf("unexpected event\ndiff (-got +want):\n%s", diff) + } + } + }() + }) + } +} + +// serverListenerWithBadRouteConfiguration returns an xds Listener resource with +// a Route Configuration that will never successfully match in order to test +// RBAC Environment variable being toggled on and off. +func serverListenerWithBadRouteConfiguration(host string, port uint32) *v3listenerpb.Listener { + return &v3listenerpb.Listener{ + Name: fmt.Sprintf(e2e.ServerListenerResourceNameTemplate, net.JoinHostPort(host, strconv.Itoa(int(port)))), + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: port, + }, + }, + }, + }, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "v4-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "0.0.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + // Incoming RPC's will try and match to Virtual Hosts based on their :authority header. + // Thus, incoming RPC's will never match to a Virtual Host (server side requires matching + // to a VH/Route of type Non Forwarding Action to proceed normally), and all incoming RPC's + // with this route configuration will be denied. + Domains: []string{"will-never-match"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + { + Name: "v6-wildcard", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "::", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(0), + }, + }, + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + // Incoming RPC's will try and match to Virtual Hosts based on their :authority header. + // Thus, incoming RPC's will never match to a Virtual Host (server side requires matching + // to a VH/Route of type Non Forwarding Action to proceed normally), and all incoming RPC's + // with this route configuration will be denied. + Domains: []string{"will-never-match"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + }, + } +} + +func (s) TestRBACToggledOn_WithBadRouteConfiguration(t *testing.T) { + // Turn RBAC support on. + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + + // The inbound listener needs a route table that will never match on a VH, + // and thus shouldn't allow incoming RPC's to proceed. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + // Since RBAC support is turned ON, all the RPC's should get denied with + // status code Unavailable due to not matching to a route of type Non + // Forwarding Action (Route Table not configured properly). + inboundLis := serverListenerWithBadRouteConfiguration(host, port) + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setup the management server with client and server-side resources. + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.Unavailable { + t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.Unavailable { + t.Fatalf("UnaryCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } +} + +func (s) TestRBACToggledOff_WithBadRouteConfiguration(t *testing.T) { + // Turn RBAC support off. + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = false + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + + managementServer, nodeID, bootstrapContents, resolver, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + lis, cleanup2 := setupGRPCServer(t, bootstrapContents) + defer cleanup2() + + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + const serviceName = "my-service-fallback" + + // The inbound listener needs a route table that will never match on a VH, + // and thus shouldn't allow incoming RPC's to proceed. + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: host, + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + // This bad route configuration shouldn't affect incoming RPC's from + // proceeding as normal, as the configuration shouldn't be parsed due to the + // RBAC Environment variable not being set to true. + inboundLis := serverListenerWithBadRouteConfiguration(host, port) + resources.Listeners = append(resources.Listeners, inboundLis) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Setup the management server with client and server-side resources. + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + cc, err := grpc.DialContext(ctx, fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Code(err) != codes.OK { + t.Fatalf("EmptyCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } + if _, err := client.UnaryCall(ctx, &testpb.SimpleRequest{}); status.Code(err) != codes.OK { + t.Fatalf("UnaryCall() returned err with status: %v, if RBAC is disabled all RPC's should proceed as normal", status.Code(err)) + } +} + +type statAuditLogger struct { + authzDecisionStat map[bool]int // Map to hold counts of authorization decisions + lastEvent *audit.Event // Field to store last received event +} + +func (s *statAuditLogger) Log(event *audit.Event) { + s.authzDecisionStat[event.Authorized]++ + *s.lastEvent = *event +} + +type loggerBuilder struct { + authzDecisionStat map[bool]int + lastEvent *audit.Event +} + +func (loggerBuilder) Name() string { + return "stat_logger" +} + +func (lb *loggerBuilder) Build(audit.LoggerConfig) audit.Logger { + return &statAuditLogger{ + authzDecisionStat: lb.authzDecisionStat, + lastEvent: lb.lastEvent, + } +} + +func (*loggerBuilder) ParseLoggerConfig(config json.RawMessage) (audit.LoggerConfig, error) { + return nil, nil +} + +// This is used when converting a custom config from raw JSON to a TypedStruct. +// The TypeURL of the TypeStruct will be "grpc.authz.audit_logging/". +const typeURLPrefix = "grpc.authz.audit_logging/" + +// Builds custom configs for audit logger RBAC protos. +func createXDSTypedStruct(t *testing.T, in map[string]interface{}, name string) *anypb.Any { + t.Helper() + pb, err := structpb.NewStruct(in) + if err != nil { + t.Fatalf("createXDSTypedStruct failed during structpb.NewStruct: %v", err) + } + typedStruct := &v3xdsxdstypepb.TypedStruct{ + TypeUrl: typeURLPrefix + name, + Value: pb, + } + customConfig, err := anypb.New(typedStruct) + if err != nil { + t.Fatalf("createXDSTypedStruct failed during anypb.New: %v", err) + } + return customConfig +} diff --git a/test/xds/xds_server_serving_mode_test.go b/test/xds/xds_server_serving_mode_test.go new file mode 100644 index 000000000000..2247a077e3bb --- /dev/null +++ b/test/xds/xds_server_serving_mode_test.go @@ -0,0 +1,395 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds_test + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + xdscreds "google.golang.org/grpc/credentials/xds" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// TestServerSideXDS_RedundantUpdateSuppression tests the scenario where the +// control plane sends the same resource update. It verifies that the mode +// change callback is not invoked and client connections to the server are not +// recycled. +func (s) TestServerSideXDS_RedundantUpdateSuppression(t *testing.T) { + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + t.Fatal(err) + } + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + updateCh := make(chan connectivity.ServingMode, 1) + + // Create a server option to get notified about serving mode changes. + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + updateCh <- args.Mode + }) + + // Initialize an xDS-enabled gRPC server and register the stubServer on it. + server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) + defer server.Stop() + testgrpc.RegisterTestServiceServer(server, &testService{}) + + // Setup the management server to respond with the listener resources. + host, port, err := hostPortFromListener(lis) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + listener := e2e.DefaultServerListener(host, port, e2e.SecurityLevelNone) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener}, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + // Wait for the listener to move to "serving" mode. + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh: + if mode != connectivity.ServingModeServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + + // Create a ClientConn and make a successful RPCs. + cc, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + waitForSuccessfulRPC(ctx, t, cc) + + // Start a goroutine to make sure that we do not see any connectivity state + // changes on the client connection. If redundant updates are not + // suppressed, server will recycle client connections. + errCh := make(chan error, 1) + go func() { + prev := connectivity.Ready // We know we are READY since we just did an RPC. + for { + curr := cc.GetState() + if !(curr == connectivity.Ready || curr == connectivity.Idle) { + errCh <- fmt.Errorf("unexpected connectivity state change {%s --> %s} on the client connection", prev, curr) + return + } + if !cc.WaitForStateChange(ctx, curr) { + // Break out of the for loop when the context has been cancelled. + break + } + prev = curr + } + errCh <- nil + }() + + // Update the management server with the same listener resource. This will + // update the resource version though, and should result in a the management + // server sending the same resource to the xDS-enabled gRPC server. + if err := managementServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener}, + }); err != nil { + t.Fatal(err) + } + + // Since redundant resource updates are suppressed, we should not see the + // mode change callback being invoked. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case mode := <-updateCh: + t.Fatalf("unexpected mode change callback with new mode %v", mode) + } + + // Make sure RPCs continue to succeed. + waitForSuccessfulRPC(ctx, t, cc) + + // Cancel the context to ensure that the WaitForStateChange call exits early + // and returns false. + cancel() + if err := <-errCh; err != nil { + t.Fatal(err) + } +} + +// TestServerSideXDS_ServingModeChanges tests the serving mode functionality in +// xDS enabled gRPC servers. It verifies that appropriate mode changes happen in +// the server, and also verifies behavior of clientConns under these modes. +func (s) TestServerSideXDS_ServingModeChanges(t *testing.T) { + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Configure xDS credentials to be used on the server-side. + creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ + FallbackCreds: insecure.NewCredentials(), + }) + if err != nil { + t.Fatal(err) + } + + // Create two local listeners and pass it to Serve(). + lis1, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis2, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + // Create a couple of channels on which mode updates will be pushed. + updateCh1 := make(chan connectivity.ServingMode, 1) + updateCh2 := make(chan connectivity.ServingMode, 1) + + // Create a server option to get notified about serving mode changes, and + // push the updated mode on the channels created above. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + modeChangeOpt := xds.ServingModeCallback(func(addr net.Addr, args xds.ServingModeChangeArgs) { + t.Logf("serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + switch addr.String() { + case lis1.Addr().String(): + updateCh1 <- args.Mode + case lis2.Addr().String(): + updateCh2 <- args.Mode + default: + t.Errorf("serving mode callback invoked for unknown listener address: %q", addr.String()) + } + }) + + // Initialize an xDS-enabled gRPC server and register the stubServer on it. + server := xds.NewGRPCServer(grpc.Creds(creds), modeChangeOpt, xds.BootstrapContentsForTesting(bootstrapContents)) + defer server.Stop() + testgrpc.RegisterTestServiceServer(server, &testService{}) + + // Setup the management server to respond with server-side Listener + // resources for both listeners. + host1, port1, err := hostPortFromListener(lis1) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + listener1 := e2e.DefaultServerListener(host1, port1, e2e.SecurityLevelNone) + host2, port2, err := hostPortFromListener(lis2) + if err != nil { + t.Fatalf("failed to retrieve host and port of server: %v", err) + } + listener2 := e2e.DefaultServerListener(host2, port2, e2e.SecurityLevelNone) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener1, listener2}, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + go func() { + if err := server.Serve(lis1); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + go func() { + if err := server.Serve(lis2); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + // Wait for both listeners to move to "serving" mode. + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh1: + if mode != connectivity.ServingModeServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh2: + if mode != connectivity.ServingModeServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + + // Create a ClientConn to the first listener and make a successful RPCs. + cc1, err := grpc.Dial(lis1.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc1.Close() + waitForSuccessfulRPC(ctx, t, cc1) + + // Create a ClientConn to the second listener and make a successful RPCs. + cc2, err := grpc.Dial(lis2.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc2.Close() + waitForSuccessfulRPC(ctx, t, cc2) + + // Update the management server to remove the second listener resource. This + // should push only the second listener into "not-serving" mode. + if err := managementServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener1}, + }); err != nil { + t.Fatal(err) + } + + // Wait for lis2 to move to "not-serving" mode. + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh2: + if mode != connectivity.ServingModeNotServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) + } + } + + // Make sure RPCs succeed on cc1 and fail on cc2. + waitForSuccessfulRPC(ctx, t, cc1) + waitForFailedRPC(ctx, t, cc2) + + // Update the management server to remove the first listener resource as + // well. This should push the first listener into "not-serving" mode. Second + // listener is already in "not-serving" mode. + if err := managementServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{}, + }); err != nil { + t.Fatal(err) + } + + // Wait for lis1 to move to "not-serving" mode. lis2 was already removed + // from the xdsclient's resource cache. So, lis2's callback will not be + // invoked this time around. + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh1: + if mode != connectivity.ServingModeNotServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeNotServing) + } + } + + // Make sure RPCs fail on both. + waitForFailedRPC(ctx, t, cc1) + waitForFailedRPC(ctx, t, cc2) + + // Make sure new connection attempts to "not-serving" servers fail. We use a + // short timeout since we expect this to fail. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := grpc.DialContext(sCtx, lis1.Addr().String(), grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())); err == nil { + t.Fatal("successfully created clientConn to a server in \"not-serving\" state") + } + + // Update the management server with both listener resources. + if err := managementServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listener1, listener2}, + }); err != nil { + t.Fatal(err) + } + + // Wait for both listeners to move to "serving" mode. + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh1: + if mode != connectivity.ServingModeServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + select { + case <-ctx.Done(): + t.Fatalf("timed out waiting for a mode change update: %v", err) + case mode := <-updateCh2: + if mode != connectivity.ServingModeServing { + t.Fatalf("listener received new mode %v, want %v", mode, connectivity.ServingModeServing) + } + } + + // The clientConns created earlier should be able to make RPCs now. + waitForSuccessfulRPC(ctx, t, cc1) + waitForSuccessfulRPC(ctx, t, cc2) +} + +func waitForSuccessfulRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn) { + t.Helper() + + c := testgrpc.NewTestServiceClient(cc) + if _, err := c.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} + +func waitForFailedRPC(ctx context.Context, t *testing.T, cc *grpc.ClientConn) { + t.Helper() + + // Attempt one RPC before waiting for the ticker to expire. + c := testgrpc.NewTestServiceClient(cc) + if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { + return + } + + ticker := time.NewTimer(1 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + t.Fatalf("failure when waiting for RPCs to fail: %v", ctx.Err()) + case <-ticker.C: + if _, err := c.EmptyCall(ctx, &testpb.Empty{}); err != nil { + return + } + } + } +} diff --git a/testdata/x509/README.md b/testdata/x509/README.md index e64a385e5f97..661caf4ac858 100644 --- a/testdata/x509/README.md +++ b/testdata/x509/README.md @@ -3,104 +3,4 @@ gRPC-Go tests. How were these test certs/keys generated ? ------------------------------------------ -0. Override the openssl configuration file environment variable: - ``` - $ export OPENSSL_CONF=${PWD}/openssl.cnf - ``` - -1. Generate a self-signed CA certificate along with its private key: - ``` - $ openssl req -x509 \ - -newkey rsa:4096 \ - -nodes \ - -days 3650 \ - -keyout ca_key.pem \ - -out ca_cert.pem \ - -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-ca/ \ - -config ./openssl.cnf \ - -extensions test_ca - ``` - - To view the CA cert: - ``` - $ openssl x509 -text -noout -in ca_cert.pem - ``` - -2.a Generate a private key for the server: - ``` - $ openssl genrsa -out server_key.pem 4096 - ``` - -2.b Generate a private key for the client: - ``` - $ openssl genrsa -out client_key.pem 4096 - ``` - -3.a Generate a CSR for the server: - ``` - $ openssl req -new \ - -key server_key.pem \ - -days 3650 \ - -out server_csr.pem \ - -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server/ \ - -config ./openssl.cnf \ - -reqexts test_server - ``` - - To view the CSR: - ``` - $ openssl req -text -noout -in server_csr.pem - ``` - -3.b Generate a CSR for the client: - ``` - $ openssl req -new \ - -key client_key.pem \ - -days 3650 \ - -out client_csr.pem \ - -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client/ \ - -config ./openssl.cnf \ - -reqexts test_client - ``` - - To view the CSR: - ``` - $ openssl req -text -noout -in client_csr.pem - ``` - -4.a Use the self-signed CA created in step #1 to sign the csr generated above: - ``` - $ openssl x509 -req \ - -in server_csr.pem \ - -CAkey ca_key.pem \ - -CA ca_cert.pem \ - -days 3650 \ - -set_serial 1000 \ - -out server_cert.pem \ - -extfile ./openssl.cnf \ - -extensions test_server - ``` - -4.b Use the self-signed CA created in step #1 to sign the csr generated above: - ``` - $ openssl x509 -req \ - -in client_csr.pem \ - -CAkey ca_key.pem \ - -CA ca_cert.pem \ - -days 3650 \ - -set_serial 1000 \ - -out client_cert.pem \ - -extfile ./openssl.cnf \ - -extensions test_client - ``` - -5.a Verify the `server_cert.pem` is trusted by `ca_cert.pem`: - ``` - $ openssl verify -verbose -CAfile ca_cert.pem server_cert.pem - ``` - -5.b Verify the `client_cert.pem` is trusted by `ca_cert.pem`: - ``` - $ openssl verify -verbose -CAfile ca_cert.pem client_cert.pem - ``` - +Run `./create.sh` diff --git a/testdata/x509/client1_cert.pem b/testdata/x509/client1_cert.pem index 714136918f30..6f82cc3be84f 100644 --- a/testdata/x509/client1_cert.pem +++ b/testdata/x509/client1_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIwMDgwNDAyMDAwMFoXDTMwMDgwMjAyMDAw -MFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4NDI1MVoXDTMxMTIyMTE4NDI1 +MVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAK3fSafgFyHediP0fonPcc/pH010l2jqryUNsEfr -PhxR//ccr7sBvbcInwvj3NJ9XqF4V4ws9h/QbPMLXg1FBcC/LpYjo6VZoNjuJLt2 -DTG2gGcTEL+4G2w/4ztrrmunLxa53P3URIgMgMYhCTIXK2enVbpy637X8WhPYOrq -w+NXnDaTwT8uLGfMVEAKNvXzf8Ras8OHjgTZJEpkgXVjREhUhOPszrBsyYKnI/f2 -QSDnvgSJbrkBLFRqluT/ciqccryBWy0qJOStVhha1I2tId+dvJsTgQa/NLBASbsU -LkIIUV375K0raINYeg/kA6MK6YDwcCtrVbQa8fu7drxxBiY3tSoDLVn1FYz7iTJ3 -PvtpwsGAqTEsSW3k7l2MTz3iuqcAgL8tI1CpyacwNPfy7j67mH3akY95sh2nmTVj -rsW2uuFSC/cc00bH+IMVZnztE7+fpgZvU63BVnf9d9TMgDe8kwMwbq7dFi9irr12 -8Szpbdnt028dgsrjpbOgPpMYJehRK2Q0I7+99cLeJa1V5ySeFhf+uhNpW9RDi/qp -TJGAG+rE3qAbVVoD9GrOispNZW7Hby4/q8pkNoafXmilqIf6mOri/88AYOMXbH4X -i8mJgIeN2AjJmEGVPBPM25ZjN+ZurWqfdSasXuiIJmJzw9ExcIEjzAjoMl9CNVVy -c77lAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFEdlWQy5/06l3GTu -rqJTuMgy4JuKMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD -AjANBgkqhkiG9w0BAQUFAAOCAgEAAmzrEprlWBxCQDzFZy5pZVIa1FniD+23qXlV -n0Fhhr0eF2udYR2tfzf0VM9WcBHHoRzX5fwNkGmIWiXAISgamMl4sHHZn6Ig0i9h -k9/fI4bYtrCiOqjYRG6VA8OZSD98bD+NtQEPQneO5F5buL0by4FUugu6Ls0Ovpk0 -yhb2pgKFhbFbMC6ev1AK9IpJZgz2q9/rjkJedGjnu35ze+94tw5Fe1FIIkg24ZQk -C4e4DzSpRz5s51LS+dS5hDGuvglWn7SrwGuGujz8iMQdAJa3WSP5WmjbuUFaD8pH -6afrjAhMZoWgxNubLkypkFUW/3W5JwTLnj5wPhPpBtHX6NQ30/FgN89j7+0Zp064 -i4Ur1ykhHgbdUb/EB28sXs+/CkmfmFx44M68yhoJ5euUzRF5gGmxSgRn3+RVsw2E -ju0YQBVvH8JjAt0XCi9SY+vCpe+EG0uV4HxEO6DDdSslsMkuiAeM7pvZTM3FbZyt -BXpWs/L71OF37ouUbt1TD+C1fsCUovjGi4AE0KXeO1rv4u2mTGfxtOOUFKt2dFDa -E1sjyJm1+WjDgIqNjbubM6zpvNtix0xaOXqg7MAt4OJnKAeRQELgJhe6Rt2ROKGq -Hoy8uIjcA26/lwclj2h7fwiKznlxqfDxVsiwmCTdJJb76w69UQvyIRY3tlJr3c3+ -O4VSONQ= +AQEBBQADggIPADCCAgoCggIBALUoje/J3uPOJ0dapY2s7mGLVPhYRaHyRnJE2/TY +zFOB0IisAF3R7BIDufQrHhk3fh0JazCw95TDD9rxsKEVs6Z50lmDkrg/bjlsniE/ +n+M1JacaLQW7xfh2L+Ei4jvMr101nAsimd6IxFU9m3+2SFbhPBG/GWWJ2ZKqQblz +DVMpNg9FYNmMe45vLevOhdPQBE4cVoAPhI9Je+P4Koslebhor0koUeQVeYdBbCq3 +3dQJPAHjBST6mD9mJI4yVrE3Xso3LO85WROUPhRYQyXhrgU15W6g9qTpMTfkriUe +FYLCtAPU9LBodyvjYLuwoEoyRVsA6Zh/vABteD8Afl552fV9KwN2fRVbTDAxQCp7 +P8gE3/rD1RKv7KBNJ/LrwMu7g4VO+tzYDxWee+eXPQ6M/zRWAb3E0v3UNHsF1ZBl +rlFhEiRShHrXDEKMQwCTSrRjwYajUpZ/Hq2USDgkLepKmTmCaoBfWHPyZwblqSTn +A4DNOh5N23eJyrLnJOPYjzZqEPfX5hDTjFRdVTQxtmYlJ1muwtlNyuwZDImhjO6G +54pPj/bV6gy1+YpIQBemPoXtqqmcRiEVWSV5zAizwRaWf85tqpxb1Tjuj2OpD9le +oO4JX0HLjhyQBoKspNohu2I4+s7ex/w92bf76cTpYTbMJqIp37YZmfPVztHVaMl4 +W0xRAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFMRdhhib+RS6IJpQ +zFsaKH1BNbyZMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAHyQwLSo/UdSoZCKcdeb0DCkeABRMUPIysroj +gQJCe3cAKOT+TxL6Qnp6jzM2/vt2T/lzlRp7RAB1jH3J4wy5re4cbCn1sbII3Rog +Nm4PKcw6hfUET44t1Gk9DsCjgvxEIirFBWVpxfn+YDI916iH1fkNURaMP+yxpQBL +3K4bmxanBiyBUHC8cyChLMD2NwXjOAA4pZFk0ohpmK0YUk4ra3Z3Q30DCH6NZ1ZP +aOMDHrCXU6MLlmPk8yiOnotgjqiYEgi3Bzxd/OHpR41Xo8k6g3UrN2GEQFs17ibQ +CQasxodOar5Vezu6ZKCYk5TaY4lugT34w+qxi8tVF54WY2jtWY5PUmU6ZT2Dw5cn +CQzlPUdEebOc1hltTvsD049/2lZmGlMXk0dykxy51jYAYznf2rb3cnC1vu1Wgi3w +J28xXBYD8AvME9jaJ6g3L+KR+AFCSLqpUsTxvu9zKf6pLrVtOCl+9G69uOK/wono +yMGNeel8rkzwzzr1LNrhmcKHqipkq83vqxIUT/mbpBUKO1ZXVG/TWKS6bpBTc4Pn +hBCIvGOSyoKuEiXnFr6fqLhLskUNcCNl7iOfA9h/MhS5ZufJXhhXu3Wbo/KC/mNh +y+fr1S9AyA+EJaYtJRKAOeewGvXYb881UNXWGCQU1aVNJnujRKFyhd07sEjxsad9 +Bn/aYes= -----END CERTIFICATE----- diff --git a/testdata/x509/client1_key.pem b/testdata/x509/client1_key.pem index b7a3930254fd..6cd652c55435 100644 --- a/testdata/x509/client1_key.pem +++ b/testdata/x509/client1_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEArd9Jp+AXId52I/R+ic9xz+kfTXSXaOqvJQ2wR+s+HFH/9xyv -uwG9twifC+Pc0n1eoXhXjCz2H9Bs8wteDUUFwL8uliOjpVmg2O4ku3YNMbaAZxMQ -v7gbbD/jO2uua6cvFrnc/dREiAyAxiEJMhcrZ6dVunLrftfxaE9g6urD41ecNpPB -Py4sZ8xUQAo29fN/xFqzw4eOBNkkSmSBdWNESFSE4+zOsGzJgqcj9/ZBIOe+BIlu -uQEsVGqW5P9yKpxyvIFbLSok5K1WGFrUja0h3528mxOBBr80sEBJuxQuQghRXfvk -rStog1h6D+QDowrpgPBwK2tVtBrx+7t2vHEGJje1KgMtWfUVjPuJMnc++2nCwYCp -MSxJbeTuXYxPPeK6pwCAvy0jUKnJpzA09/LuPruYfdqRj3myHaeZNWOuxba64VIL -9xzTRsf4gxVmfO0Tv5+mBm9TrcFWd/131MyAN7yTAzBurt0WL2KuvXbxLOlt2e3T -bx2CyuOls6A+kxgl6FErZDQjv731wt4lrVXnJJ4WF/66E2lb1EOL+qlMkYAb6sTe -oBtVWgP0as6Kyk1lbsdvLj+rymQ2hp9eaKWoh/qY6uL/zwBg4xdsfheLyYmAh43Y -CMmYQZU8E8zblmM35m6tap91Jqxe6IgmYnPD0TFwgSPMCOgyX0I1VXJzvuUCAwEA -AQKCAgEAidNL4aUC8TgU0h+HBtrHzxVuWMmpE+OkfmzBZeEV1QEzM8Erk8OnjSVq -XdR8QOZcUwa/7z/cwg9Hrck+/qnOC6IA3cbWe8X2eL8dovPLNbMDSbGVP0RDiKWE -DKApHPDjpNIkWZkf0fCHS4b4cRpor7u3exqJjnzCwfraSp1aNiZGkATD1L9XN9iC -mFkAhCpHB3EWulIDw9gUqlvNOy46/FLzHHGkzbkOa2DuZCpyKhFJUPNYL5K8fxYX -EuNirmBhmwe3LLARmqvEaX3mq3+oMEgrL4pgZua+b1AmogM3P+S0CxoXhSW5rRQ/ -fcUzFNUbj7gIUoK85w3M780ELBAz3F0j9cy1/DcidV0T8SAzKVrpiJvvK59XYzzn -3J4JFmAsZ0PYgkPhZyPY6hNysRFapPwJyNC+I1NVRpSNHifMsYNEX5dV4M6Qtmv4 -7QmtvUubpJ+vo75W0DNzQ8Ar4BaBVZ6YzKTW58/Ob9Y1o6knUJv/lElE9RLyJBrn -PgtFMPDjf2FzYaA45+zVtQBDk3rljLatS6WZxWg+qh+4RPQjS6sKzNB7U728oiZj -1PRMbeUGKAZDb6FWTZ5nlvai3Z1VDwmLdBBSACnUWLOhXqmnkWY0q9d3kSGnMih4 -Au1A2sCFhhoowoyEkbbmlvORDSo6jfqdYKxP2rUQV1DJBPepo6kCggEBANQzH//s -CTcB9fMSIpiEUmcFBBineA+uMXDbmNRzeJMoFwSxt7TXQpVqfMOXTaa4aDuCSCNX -VLIf10aP4Myx9A0i+t4A9IAn9Ps+wCu5Yb43XmiEc2PC/AZYuviYfP/rIptTS0o8 -z8zAc1cLdDYBww76DcKdagAQABZQaqPARlGEHAvqmr5fjR0oWfcGeEvzqdv7WbGf -9nyuAWl1ldMmILysW0GRDudFhp5rit6A3uCq7LB5Qb14dGrek5k+y8lnzjp30r0O -9QxUuxZVuvh4ujiDnQI5tVWbhD/jgIUF91Nm/Vw0bZMdcp0iA9r8EGmFaHNi0by7 -rMw/6Pqcxd75qP8CggEBANHC5PZLyZEmt6C/c1ohTIZtf2g5ghXSHARfwMvRTVJ5 -4HksZp/FQSe3080l4yACXDpfxJ6pm5WNFNhy4Xi09wkEIWk+bSOqBgk+DvItgqkY -em3q1EUUdhzIB3OXqWRcpgmc78hLiD33GkCTM9BR6W2Q/5TY7o5ULOjkiDKiVL+r -+juFlXQtUTOak0Mwu1RRDQE6z96N5Ffg2rHxjNu1HxQK7OsSfc/lrwOyqnXaB7kR -7CThI4xpSmtyMq4prxehM1YhKk2rJmT4hW+M636uyxZCBg1Aoqqnoxv0sQTHH6k/ -RU1+ZU38RYLzid2qNBom86RS1fWX60H3CH4EX3AVFBsCggEAFMOv9O4W9MAHXjK/ -GeeQ3K3b+cGheP9VrTJ/4QIvoU7B+d6eGF8cD9zsuoL6wT64TGJyRqsMCaYd/bSk -jcM4G3T50XGMe2HtkgxQ57ZrPx7R6S5U0EVLPh++pAbf7HcI2uQqsOgEeYe3gaQI -SiSf/r4vTIT00269Y3GZDc8J0n439F6Pp+NXvqutKgQDD4OXcoRFAaGikA7C6pvr -/k5z06KWB3N3XuApzSS+4QkBRkDTim1DJpQ76B1BmjRP4rR6tLP29jMZfYxpBkV7 -V0cRCeivG4GkIe1m4o2TjPDJg+rHDhe/RS8TgRbMA8i4nmrEjs3zsiE3RoFWffeL -UUdi5wKCAQB86+rb26rBbSNy8lHKXYZrkI6ODaGxSR4yZKw3NgEsmzTaNV0wzZLO -CqZyyJuJFp7CjQJV04C7AfhmJ5SsBGoSzojvWqQ41ysdGf5gsEXeWpufFnkwYs0s -utvlNW9GO/8OPo525LTQ4naZ+pCjAgVYoT/073SzAuJ0GJYcQZzjQZKXHCkztUFk -0CvfmggWYOaz0si1LB/PTjQwQUC4IBfQIemS3cJbq9gdBayK3zw2NbxDAmnfV11g -u/P+0QhbtD8Ujk/ZTZJiE7e0BWLCYWrFaLCd995ob8mt/n3l8IikjO/DBQFj/leP -c2apwpGg+Y2kUUjnKICNGofONOB5qbP9AoIBAQCSKpGUVqnsb0PSqjrhr8B1VFvS -4MZfe0ds6/GrB02D7owHPhPaSJsXhBXVri/ECSx2WripMujbZ3tZH4IPub848PYv -9668O1RxKRkyoknyUn5TO58dhYbp3VO7P7EqfVfqEezyQ8bDfVGxrIbMA+kXJosi -T052e3yNin6Q1r+R3cWCg0dHBGDCCkpKdD861LkYjfyipw+u8c4O+CefTHvd8XV8 -EXGn+NBBAPG42bBsJMa+P/1k9qJbflbUfQy/lPGxMspVD8xwWWeEOJEFTgGmoLWE -cNtabvDCEiQ6+DjBBE2Cl656MjX9uv0Dn830so/PLr6FWK0JSy9sGIRTrPC/ +MIIJKgIBAAKCAgEAtSiN78ne484nR1qljazuYYtU+FhFofJGckTb9NjMU4HQiKwA +XdHsEgO59CseGTd+HQlrMLD3lMMP2vGwoRWzpnnSWYOSuD9uOWyeIT+f4zUlpxot +BbvF+HYv4SLiO8yvXTWcCyKZ3ojEVT2bf7ZIVuE8Eb8ZZYnZkqpBuXMNUyk2D0Vg +2Yx7jm8t686F09AEThxWgA+Ej0l74/gqiyV5uGivSShR5BV5h0FsKrfd1Ak8AeMF +JPqYP2YkjjJWsTdeyjcs7zlZE5Q+FFhDJeGuBTXlbqD2pOkxN+SuJR4VgsK0A9T0 +sGh3K+Ngu7CgSjJFWwDpmH+8AG14PwB+XnnZ9X0rA3Z9FVtMMDFAKns/yATf+sPV +Eq/soE0n8uvAy7uDhU763NgPFZ5755c9Doz/NFYBvcTS/dQ0ewXVkGWuUWESJFKE +etcMQoxDAJNKtGPBhqNSln8erZRIOCQt6kqZOYJqgF9Yc/JnBuWpJOcDgM06Hk3b +d4nKsuck49iPNmoQ99fmENOMVF1VNDG2ZiUnWa7C2U3K7BkMiaGM7obnik+P9tXq +DLX5ikhAF6Y+he2qqZxGIRVZJXnMCLPBFpZ/zm2qnFvVOO6PY6kP2V6g7glfQcuO +HJAGgqyk2iG7Yjj6zt7H/D3Zt/vpxOlhNswmoinfthmZ89XO0dVoyXhbTFECAwEA +AQKCAgEAjtzrijWVy+sQuMm4k1DUMSKzIKJkT4GDoqvBFoc+I4DVVmLmaxaYZ+B+ +bhruwo4rq3R5Ds4QgUWPJGfDllVJ9rhNdYA4XYrQPwL0dV36ljCcf/o5lTLuvbFe +stpStTwG86fKZlGkLIWI53wNPBshUzqOp6QfwB6E8Y/JAxnDYVi3pDVfWlDaQ4pU +GYklqtN6AauBX75dGK6nwDE+Q7uLES2lRjlA03FIBK1IQyv7CTM7GnXQ4cep9x1z +KJx0F4+F9kyq6AE+yRz4FA1C7wXZuYw2YhcYSxcHVH/IAceGyTcIxZjUWqYXjQnk +iD+TONAKN+kxTq01MtUhpfWasqC/i+6QU1eqf5YWpd6GsRKyrGgO02NND/SM6Z3V ++S9og4QAjdUyc8dkN+udd1K1CeYNFbmhrYpF2aS9k/PjDP3L137hDW6Cy+thIjZP +u9OB6ba2yUrbQDlmkCbh0vX+77HKAbT5bj8h9r7MqzNsPsgkaKS8gZ79T/Whr/ft +Xiu+eo/u1jtjwUjNMKGxQ9XiU2UU7QccthHHLcYaiv4eySHXA75h+Sho9cD1Vvs/ +ms1/nbCSuU9TSK0UK/V8YjeDA0eVGtDCX3weIW2ECQ80SoT7uf+fhjaLkvOadb7f +1O9DvYVYZvblxUm8ajOh+/n9VyB/I9R9Q8GdGiauXy16uXLZMdECggEBAPEx+4aR +XfuXmnEoPk3GpvyrRlrMy02aklrATKcxeAYo2uiIcuQv65E3AmE1GHpoxmAKjLty +fuUfGdT7f4uGeF6p+IEkW4jQm56UFbCdun9kduEaN9FRylTBqUKWIY2rtRS6nHZ8 +bAkL/6Uv3g9NWx95rV7HnAfC2n6AIvc8LRfQVVqSvjPbsEPvJAT2353D0Rb7vC2M +1hKeBrSNBiy57EKnrMDOhNpBvSBU0Zc+YsBRNAimKyBz7dt35H+THkFaEk9vGtG0 +QkDvngPzSX99Ojwk2mo9jGrh7LHErWih5C73IfvYUh3kyEwbZ5y25i9Z0F37boIG +jHSVvcPp+9x9PNUCggEBAMBHLyhBUAQVZFXtWysr0BjO34XffgkSt1XQa8cVxif7 +glWauUZtjfC7PT/qgY0mx2dI2bDcKiQQCBlVavP1RLRwj3rZv23eit7z13UgHSa6 +3dnsgpO2Zux6qoV48lO4xbuFqZtW+MP+9jthKwr95r8lmZ4cmGQwXXcqNsR7skFt +30Uhcyn+MTfyLwcqt8g9i98rrJmbPAuIME/Sz9DLIi6UxQLI6MeEn92AzECNDp18 +CypOL+sDrLw/7HNHNoSblgm628BHpBgT2qaOYnawRr0gni7MHXOAbDopKYDAtLuU +ZMFjlILdfiSDouhvKtMlZG9arTB0TasdAQJGPz53H40CggEBAJ4JDvJsOzVHb2Vn +ZfNWD0INA0spVqhheDXIPDFsg2UdzdmA1i7XizUZ4xBIVuKV1i1FnFKRwb1ktGtN +4pNMJ4B3RCFx7hvl+6FbDB8uKe2gqRfzMtGPEtCYF8xOTGvkLwEHCM/F1I/U8cuN +YqWKHQOxmTw58+1N6hXq5X4zSqSI1/RBpCiccJEClwo9q+VWUaEKjpEV74pBSslw +gbQ6mihOby3h40CSxFXz3WSI9vFmA38LScS40Qf1NZ21iqRtXQP5G4x93M9pcZLL +DMRhDBAuYYItE91QbONJqAmf0cBII1c9tQhrSCY96pTPbmFmKtX5kb3Whp85Ih7F +KEafNIUCggEBALMnoIDZmjyz0fFeX3wyLotu9kY+n6jEj56dvE6bsy694grxR4Cf +w4lybPeJAX0LjPBnqK5p9bn0VheEx0rYVVPrLUVCbmNo3+wtN6wiaAcWRnAvNtt7 +MRtWkFwc/W2U1GiNeiMLPm8guT1KpFhxiva/igsQic2QYwYNh0o8FzNvtIEtUajm +9+Uw+zCqVON2tUUT5JabVa9JDfrSamAZZZgRdh/KI1sD8BDrWWUsCVojoiOhBnTr +z5730ND4oYudjIc0XF0kY3krxqc6M/Ry+vZt1fW0qhxcpHrsr4cQB1ZgRiELL+1f +g5FyNfBs5HIofRRkYMqtE1FEjRQZcAQ76mECggEAaOUtM9BZuV9gEwmG4hmFfeXq +vJOMvlsDkRRbLuDQ1B8Vw3v7lt1+K+KfBt96MoQe08MyXM7sIMB+hn+zakNaM2W6 +UzTnAPQQAo+wELqj6U3DrV7zw7I1hZTA9G7qxMAQBEmk3u2q4/zWDAcyAx3D9JVj +L3G14pYf0drFLChnknVTPRaF0Q5upLYzCPLMa9w0FLKy6fkfdWdpzyjvW7+JEeFY +koA98hrottqJB2CcqehQDSCUHKKbd5U15y1NV1BQloaPJLwpPAVTkBszQSHanltN +l9POJBJlfQ1eWL88wHdKiLbtOg6PTfAmfghIRxakjHvxBgFO1/xG6Lxm7QwUDQ== -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/client2_cert.pem b/testdata/x509/client2_cert.pem index 6777622f891b..2cffdeafb590 100644 --- a/testdata/x509/client2_cert.pem +++ b/testdata/x509/client2_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFcTCCA1mgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIwMDgwNDAyMDAwMloXDTMwMDgwMjAyMDAw +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4NDI1MloXDTMxMTIyMTE4NDI1 MlowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MjCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAOBsqW26eD9t1JvEsQH2PjcstaknRoeNi+yqxQ4w -DIWrngdeL9/achgzCx4lCbcdTv0QatGg3manPEXem5qsmR9dKj+EKPXnV2qI6PSI -fv1AwcLma0Ph2F/zMASNP5wkwgv6MaIyYx3n+F4iBGQToUaj1l9XS5E30w7k2VxN -KR7zDOGSKifavuGP2nVT8NKgXUjsh3X9F72ZIPZwvaPYkbmikOshDr8TchSgof6+ -9ng+sSmYt/Vm0yWspjJfk2qJldeIXgGRVCwOl0qBsziEk0HJSpAjjy/u9GcTGz3A -qRQ7wPmoKU5MwnNQKZGE73JRra6zk64YiqWdkg7x2WuE1Dp661bvP9iwC4EgUqXL -ZEQkISsDpT8RkWqp2G5crvyrk/cf8I8TbsPi9Q6Eg3dRkqCN8H1mkZT1assOp+G3 -2F7jOvagZfLik3xoSbvpD+u2vMRe30uPKZBNhEZv2PU2YaSEXu+a5qT328uFK254 -rLFi1DZU0eXlj9Y///nMo5kUoq3z4WcL1rnDRSJk2JZJ6Ln5SXN4lbNuvn7dFjKA -VoQa4texrCSf8jtRKzexhBi28n6LAorJT57E/mo0ZvfL6aJb7cUjbhQZZmC5Kqoa -lMaiEnoPxhMqG4m+n6bfYGLqfZlsDiTVzcgEd+RxGTlhaUIg65ZMGK5982PvV3vr -AeyRAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFHoqiLSZ1N8EEtPo -vmr1u4X0tKyKMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD -AjANBgkqhkiG9w0BAQUFAAOCAgEAZ+NQzQk5L/55+58WYQk81SyFWXLjj3RVO3fB -jUgIaxd87IrVeLKrnfoa9mMaS2Qf3SfEMhovRy6Jb2jfxbG0wLQnhx1bqNtaNLAr -2pGG/Yu+4ZzN4iIloP9dn98tYFkHOLOLfIwNEh4Yg6IB9eg+qcbDg5JlqGkGRSsu -IOS9XD2CY9zQ3KLGlVCWZ1EfW8u+du1GIUMx0DEwEYZ/zYnyTa2bCdBD7aetmbKZ -yjSQ0Ole1W3z1Q3uF8CQjZ2dr/wQ3nmxj5Km9PN7/Q9iHn7RyeypWxt5utzSG5Bf -egL0ER8kmYeKHZeagdRbKWPRyUjEligndLzh8Vi75hGFBDAx/pB0aVf81HEStKKw -WCuL0PKpKIoIqNE8aJ6jTo0OEL+Z+6uam0vSnuVqHkeigbNsmefyR82TmiJYDahM -3CBp6Q5gfw4WKIY/0JuJnN4Ym+zIgv2kKRVHGK3SHhiaCUGt2BydN9MxSjl1/B+v -U7kYVj73MJZHSl96w1mnXXFOevxb7SOP23QmTKfqmU0NakfRMcHcjnG6M5mlnIDg -DjpSJd1TLoCS1SfIyc+Fibd4grsRucnuo0iHuFqV8TZ4hi0qKKE8UG8En5KNiQDl -exFgZo6FGQa1mJxQiSfhL3VoyeZ/b3QRG+mNVDmgHsZSTjfMppfmyBJRT4HIlsHS -dWeIeN4= +AQEBBQADggIPADCCAgoCggIBALb7KLFguOBiEHR8FBI/0AFs2X3s7fN5ZCOkTf4p +s9LwAcBWm5/zUqzvZCSui+4sr3qN1b1D+Xbc4xH9+WcxfbeoA2w4d2FKKJ0qaShD +Mu4XTQfj9B7g5GZ+FUeP9rScqgJ+WFeOM6QoAgRrFAS0AMP21TpDue4AVKmD1trX +z3f1DaRBtcUa4zlk2J0GBQDPyPB4worxhZ0IW+OLz2RIl8AWJBDFKMgscxEx239t +GTOY9H6hPI7Py2koknj7LBNc84lf2PVFw5OytQYglmtkFQqntyVxtETAOL4pFOjj +Zw6MAnQBGLS6nhiXG1LkZuvWJn1T5ewhci4qNVpv/8LtrPc2Fv7jb86I5XJvdOGv +hYC6IwS9Psg9oCYaIzyandoSj8SEwXaQuD98ROBUs1raasLedg3d4xNeZCRRmnzE +me+IpHm/wS4hTMxQJHYVewNB68fl6FoyRAqoXNy5yi8uMJKbjqb9E7niQCQRO9vQ +eX0TrB23uoUXbdTz95uMiw8yy/xz11/h59TxN9cOiqDf74tymgH0jTwO8eg/bzKU +zTXxTANcfGDvS1vR+yaZDxNbZ/3A4+NzNF0M/Z7AHgEzUcx4yu2shrXXr3dhNpzb +crk9yoCnEAiP1i54euqaOOrp6O34cRCCBE18j+oEEIIfYdMRXorCPxHRzdHKlJ5K +OfRZAgMBAAGjVzBVMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFCIxr8jA3srbonwd +2AoxV5teDkuzMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggrBgEFBQcD +AjANBgkqhkiG9w0BAQsFAAOCAgEAT4hUMxXeg6cqFyyg8TeStBI3fWtmVKahlsba +Nh1KlZe5ZVTwWKh6ULn2zvSqy0t28wpER8Ky2a/yBxsssvPKGQBUgUUmUOSy0Zzk +ICU+pDJrVtZ02vOPlrx23SpnE3EFs3yXMGO5B0RGScHG62YjHyBDPJH3Gun4CB2W +PpthtYIX2FN5g15T3r6UZy62w7SjUEu5z5Ke7IOiAcnXNOXtozv4J9v3bt2Kg7WB +YS80r6b6cyOhO57jobdRBcdsWWogBXBn+ciaL8z3gLNWPs6YooA/6/95Vq0uV8t3 +xfq0XH1dbcdZOnalSwNEyOgLKxQ/yggOd9ridk9e5cGBBIfw1v1N1qDkOWjcdEoR +qjAjR4pgUa+d92/HNLYG8SqVGqACjUUQM6tigw6tHUbeqpk1iI7vT8Cl6Er6bEYE +tMTWEcWAI7GsqJXl2SJPMsObjBg34aZJnU+xxedMDF+OYZXzYeYk2De7uhXUi3iu +46alockzYqOdN6vE99Y7757C1X3N62PnEUhZN0Ri9D7i1yjQ9t0CCucam6hcqcEH +fcDIsXTQz0l97iztkPhcLd3kzAg2pXopwuHkhd3Ih5So5/m1V0rjHVVtrbSkN8/u +JlHy0/tNsJ1OaJKRqd665M9IhaRrc9KP0lzHoA1ZpUsRKo/Be8gNUaw9EP1CMDny +kKizg2s= -----END CERTIFICATE----- diff --git a/testdata/x509/client2_key.pem b/testdata/x509/client2_key.pem index dc4cca65a312..a9563fbf1427 100644 --- a/testdata/x509/client2_key.pem +++ b/testdata/x509/client2_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEA4Gypbbp4P23Um8SxAfY+Nyy1qSdGh42L7KrFDjAMhaueB14v -39pyGDMLHiUJtx1O/RBq0aDeZqc8Rd6bmqyZH10qP4Qo9edXaojo9Ih+/UDBwuZr -Q+HYX/MwBI0/nCTCC/oxojJjHef4XiIEZBOhRqPWX1dLkTfTDuTZXE0pHvMM4ZIq -J9q+4Y/adVPw0qBdSOyHdf0XvZkg9nC9o9iRuaKQ6yEOvxNyFKCh/r72eD6xKZi3 -9WbTJaymMl+TaomV14heAZFULA6XSoGzOISTQclKkCOPL+70ZxMbPcCpFDvA+agp -TkzCc1ApkYTvclGtrrOTrhiKpZ2SDvHZa4TUOnrrVu8/2LALgSBSpctkRCQhKwOl -PxGRaqnYblyu/KuT9x/wjxNuw+L1DoSDd1GSoI3wfWaRlPVqyw6n4bfYXuM69qBl -8uKTfGhJu+kP67a8xF7fS48pkE2ERm/Y9TZhpIRe75rmpPfby4UrbnissWLUNlTR -5eWP1j//+cyjmRSirfPhZwvWucNFImTYlknouflJc3iVs26+ft0WMoBWhBri17Gs -JJ/yO1ErN7GEGLbyfosCislPnsT+ajRm98vpolvtxSNuFBlmYLkqqhqUxqISeg/G -Eyobib6fpt9gYup9mWwOJNXNyAR35HEZOWFpQiDrlkwYrn3zY+9Xe+sB7JECAwEA -AQKCAgA4kiuDRWXaV00olsQnwnKcZeDE6umUcdG7rrBNiz8c0s3a/ZsDyoTIJNXA -m4V/axvmHqVOgkaNicpfsmV279sJVOq5aA8LLW2TpT9TpLSeEhzFjF+tlNh+F0cb -Xp+SNJHVgxPP1vO1LiwlTl3c/DXDILmA/vhFetTxBC7mXWzoKEwu8DFAKpvDMAfZ -W3dxIItjPnxG+a1qVZdBh9nF22mgaaIuIv8cm0I+gN9U374xQVxXJ+/3JBxFeufJ -+t2mFVh4JB/ONVwKXwMz/M24iXK1OpBZFR2a75kcAmzzfAUi3I0gYYtH+YFqn+Ja -lC/nmT82sn2ffQA2DyoqKjysJad5PWHByyepPGA6mkrAwaxn8YFsd0Yu14LaWCfO -5jKQzMvDhuAavAkaeT8EJnQdOeztXHYGV7S8rDQOgXM58W8e9+SchceJzkl1MYKf -99xXveelRaTaGOWBK1E6xPQP7iKJTeh1/Xjk0ylEnWPG5VvjcbNFwleDAnhyDTwB -OqcW2L3IV208MmDEmLuSBAFjHg8u5+/hLnsv+qozAX4yWhITZL67uBufVjKbhTi9 -viFUJ8/yGP9kIrJosQ4iDZgZv1juQLEhAw/W1eIV0gCxy/ZFfxAJXgKThZJWgSAI -FTNf3mKZOiUpuG5+Pe5fFtDa1/vmvQaE5y2lzh8ztLtFboaboQKCAQEA+a4nh2bD -WR6UC/3xQ22/Uwvntw91P18L+HyzNtgKCKKKVpwjWdaK/o9jdnRajK4s/hYKcIND -szaSjnD1vXWezw61aXZgOBai+xGdMWJFbTIRFfFcJqvFwN4cOmURX2NzLn7JPCp2 -y8HUdP0u55n0Ax9/qSkh4Eysxcy9+RMAcJ7LIsqSSlsSY9tQ78QS8ymJeePdf8xl -Ha3rlaGLpoLt/8gfYLjMfpyfUnuWrwRK79aBBKbkG7sdi5Cahnw1ZN1vxdOjpKcu -5/NhJ5OZxU9OSm91uzSkQFfsLe2t1JLnjuvcPASlhMIskhpGof8qCrjct1e7sYeo -UpyVknF7InNGMwKCAQEA5hrbpi7Nny2H4Uu7Z6aIUf3dIuPLl6f9VDkIqRon1HXx -4+1gQWhEwclB18FzFVDv4h4YAGv4upGHYo8DNl8GYrcIpZQ2dxz3QfTH5jl+tKmF -FfHIRKuBJVgXw+nVrE8HzF1M1UTCwCb8SnDg1dV8U5OfJy01LOEnp1sNW64T/pDy -unCnY2+k/ncqGmeWUKL4mbKN8GmfzIGMhwi8yiM3Cdbmk0kETDK/NIwgl+YLX6dt -lHe2g5OVoDgVatC8ViVmoQVmuuPASP1K4TPUAtRi0A0BYqPB2O/vFZ1f+yD1sJM7 -kILtz91DPB5v+7txwjD5S558TC1l8L9JCH12R7BWKwKCAQEAz4Z5RImdhM1tsCn6 -BlmJ1LToe7dVdL7DbF35d3RJorO22BYfK+Su0rbLrQE44gVDUE1xj+MKukJ5vfsV -xculm+RV1LqXbwchoB0b0pgjrIcYvGxIc7wCOjRisgafUfGPIu4uxNtmsiUBOdvW -yJmlv5LGwQt3JL+WOzHaFNQ+YV0a6mgE/9iCiI0Z0K/gMEwuACntSPPSd8C/Nzd2 -o4ff2eG0cugm0HXN1vjyXbXrsz1PL1an8oSsIfym83D50ERdSsiGE60Bx7j637JG -9UDdifDqohc3DmQF4obTHQSdgqV4AEq8aIQcF7PPUYaMoyzUB2/cicp/lWqgx3+b -IR8/EQKCAQBtZA9P7agrKEYUwSASooTkFb/vOkQrkN1KEOMhISIWSwv3w32jGqK1 -TaxTmc/QLm4cHRpj+PCCIXUvUbXBP2OVwlYGAXPzJH4XiPsPY/3sfTqbuBnxK2d2 -DW8e4CeIhvm6GhDQwqOjHeWKrib1AUzdnqxmv4MsFs33Lb4n+5Xdy6LZJ30sNINH -xfbqHpzDMPbmepAn3s7tNhlMiMbXge5Eazmqg2fbobRsksFb9S0rCDl7/31xB9R2 -GrNz2E/w1E759ctkxalACcpzTWRZBAcFyWkDL76UF1yd9fcPOBgVHamPhe7whsvT -5NRv5CisnQOnA20r+dkgno9lzd9RLW+JAoIBADJ0vUL2nJZkM6reh4+bDAoRDP3s -U6JNPAmkMvWsiMckm+WKUtUo84VDBSIKX897z5sZ1AfkWS8P9MqyiDbPiJCuuIkq -h9OJIHVEQ8NfmD/sl/3TE+ig0OzIbZUL3sssL1Iadkkn9hNnYIY1nt5QsKsWJ1m7 -u2+6DHTkj0TAM6SGt41TvRQyLS/fGomqmAkqYNuN3jdEGF5cFJoeyhOh/EoMP3RC -LabPAhwUZzIH+JO93Ws5nuKOTPnryDQOM4Ug09aPLaJW5GRmfKVie1iDV6sp7KBI -7OqHcuieCyxXHrFRESmxkMj87DaQ5mTo/q8qoZ1nOZ58vohAjbPvIaQ+vL8= +MIIJKQIBAAKCAgEAtvsosWC44GIQdHwUEj/QAWzZfezt83lkI6RN/imz0vABwFab +n/NSrO9kJK6L7iyveo3VvUP5dtzjEf35ZzF9t6gDbDh3YUoonSppKEMy7hdNB+P0 +HuDkZn4VR4/2tJyqAn5YV44zpCgCBGsUBLQAw/bVOkO57gBUqYPW2tfPd/UNpEG1 +xRrjOWTYnQYFAM/I8HjCivGFnQhb44vPZEiXwBYkEMUoyCxzETHbf20ZM5j0fqE8 +js/LaSiSePssE1zziV/Y9UXDk7K1BiCWa2QVCqe3JXG0RMA4vikU6ONnDowCdAEY +tLqeGJcbUuRm69YmfVPl7CFyLio1Wm//wu2s9zYW/uNvzojlcm904a+FgLojBL0+ +yD2gJhojPJqd2hKPxITBdpC4P3xE4FSzWtpqwt52Dd3jE15kJFGafMSZ74ikeb/B +LiFMzFAkdhV7A0Hrx+XoWjJECqhc3LnKLy4wkpuOpv0TueJAJBE729B5fROsHbe6 +hRdt1PP3m4yLDzLL/HPXX+Hn1PE31w6KoN/vi3KaAfSNPA7x6D9vMpTNNfFMA1x8 +YO9LW9H7JpkPE1tn/cDj43M0XQz9nsAeATNRzHjK7ayGtdevd2E2nNtyuT3KgKcQ +CI/WLnh66po46uno7fhxEIIETXyP6gQQgh9h0xFeisI/EdHN0cqUnko59FkCAwEA +AQKCAgEAtP73H42nEfyufiqFyA9q9x3ufMsyDFYVIdRSeYhSoeJaOSDyS2NqcjlR ++57UN0HoSfemZtKoHlUcHx3z54li65m72P55x7iNN/lNj0/5Pt25ioaHYUvfYSpy +bhkPVVRqLpE/XUwB9OzGIgyw/n33C+BKxplbfvrAw/TvQAWc6PFzDvkYjeGsxYbl +ZV0g8c6W2pb5CGsjWVN9YTVYbcAIqy67egMr9eVR5L5GemM2PH2dyuw+dJ1CfcBu +MlFxJa4aD9bJSsQ5Uw3AVlFBuPSEg8emN9mjESZ6ek80qbDWreL8QjcbcxntbDF8 +C6B11e48oFeu5MWopdWGdPC4Mt7a6Pjjy/ESGHcKqiDPP0VdcEgKpmowXI2CtXfz +k9bbIAoveMgFThX4eb/d5DzYXID9MkSd3GdZXMa2Z2xqX3/S4dujWKda0VlN61vd +3sX0Xd6Wno91vobjFx3tqhqumKpZ/1MjNDqzB6v0lRzxaiFPT5/h6hTIzGKslzvJ +H9bTUyoocXo6Xuskw5FHcM3VFriJljfFOi09eqVvldSvaBosYc6MIRuw8zuGuSon +q9ZBIYDgdnfuXhMh2cohEUOHoi692FgGsC651rl/bgHncx3q7IS995vz8NzmZF2V +dpN9q4v6nwBuePQs23s2MAEF5REyeOR/eA7gWbtGASnZMfmyk6kCggEBAOMGYgAZ +JMr3dY0bZsp2hdS2quHau2mUIEvV4FMvu/FLLiFq0JMHNe3vlmZrTLR/JW9TfK00 +ymPXEc/v01raJDeG2Y7I0086v65tDmdHyEE30MLEYNww2XsPqZIoacJNEvr/Jmki +O/nSaUszxJ4ygOGA6u7oJi1YJ+l2Oe8kQ6UMxoDHSDO0Y2Fjhg+TKCF0+a0Y9ddP +Q8k0B3MOXUcUuGk5ZtnXXJbDBq2Fvpf/pGsCp9twESyu/nbGSGKAClH3hfxXNU/C +jBUbX9Jyxgw5ZqWqRIt7O5NBliav3MClKKKVBYWQiju0SjV4GC2s7kF3lfohFq6R +ltGgn0pxaXsLqbMCggEBAM5Vubx62pQ9O36FPCp4yLCzMeGuuTA0Wq8LoC1OtBvq +OtHjKZdmx+Pe4W224iyceK1hEYNd18Byv1w/FSJPR5U9W+jk/GYTQ1WlMosAeGYG +fNvuLCJUDxO1cDimRIuCiQeAeYiAgCmyfmsdaUEiwMYsI22ITlSeKavIULiZDvc6 +JUQfDgfsmmD9ZtxVhwyGuLgqnHEOxXv4Cti511/iYbwM1NMB1tvmuDpjZAwpQMpl +/Fq4N/cNCe91/sAF/a3VXMZlxXey1kGmXLlCPFdzGGMGelI0v8cbB+dJ11NnZDC7 +EZPknj4jiEHVkN7/jl+WVk4zhfr8l85xh5Q+nP8/C8MCggEAPNUkA3S5WC2w8Qur +oorZ16LO7VAoMeVANjHsNz4uNTz48nllxFAFUmmFupH77s23ITqUyPDBXrlti3Nv +BgQ3+i0HNOx5Oty6KioM1v30Gg2zwczPS5FHZWNQA9sSY781W85s43UJ7ypDjqQj +hmRwBnz99uB8AmCB6VwFsB/ehGaE9lLv9PLcQmdhr+C1uylWEd0DWxthRZPMfzcV +JYvW0lNQTQUZSUifDHYvGRmmXApNIk7IO1n006zUDpjSqx4RaAmSPnoaATnhlkms +6e+joraaQWnXD+FeM6WiGHjpB4+4+A5ADDmGPQeeKvcQrLg3ltuw8TwP1sIcjN0Q +76izYwKCAQEAlYaQPCN3pTeelrhs+oZfQZYKjvb0oxc9pF6zbEH9ycD7cUDC0kIc +l2jcSore6t9VoKeYbm+iO4esX2gjo6J6SI+XvHW85ygMgtNdhlgH6D/JWgQGnbX2 +2xyAP71WLReiv/n9mMsulYkRjgRZU2eg9bvkzKqbwTyBDEj1HmFk9AqCGRS8MUfo +NGNOmFuuq4gx8tyGVHQU7xq4mYhLqOPAWeuei29oyiEv3rhKN3npxwMTVpbrj7A2 +Q/9pZrSwurnFKs1zxaOnGxo5VdPHMMRqptB58nrhg6N2HcloLrvdYmcefOOPPY64 +XqUrAD+IaILk9nTmIhXM2UFytB6P3XVNywKCAQBsOZZ7Bk3LEZHpqqlqOy7U9jjI +39tir93AEIf46i2Rn0YgynuTpsh458E4LEH88ZXJCDdfOtFPTEqa0wnm1DHhpai2 +qyiNeXWFpmhbsEgLR4RfiASVyl1W4febZT+JpcVkYtkMwro6u6Owy8L34SO+rPCb +X2IyPqQ1+lj/9ZvXU9AGaFgZNQ9bui3sK3ifvNYLGbPTBM939hNdOoI5mW44eEHQ +ZDBKjiMNnkWlNnFJk2DEyGVIQak7pVgSygX+RkMAP/OjDPNO3DyS81WXZKuxOlda +rWV0liu6hYAAz4Bq881oXzTviG24BgUjNJVq0qYtIzrsbW7fDYirkn5ap/7k -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/client_ca_cert.pem b/testdata/x509/client_ca_cert.pem index ad6fea8ecfeb..026a4e478412 100644 --- a/testdata/x509/client_ca_cert.pem +++ b/testdata/x509/client_ca_cert.pem @@ -1,34 +1,35 @@ -----BEGIN CERTIFICATE----- -MIIF6jCCA9KgAwIBAgIJAKa2/29Hc+P6MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD -MRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTAeFw0yMDA4MDQwMTU5NTdaFw0zMDA4 -MDIwMTU5NTdaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD -U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LWNsaWVudF9jYTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL9LRGKPAszSvRzSKwgP6niaPy0w -wbSILjrp60WVHB9jjOSIvgCaTev9Tz/+zCaxCqM/hIrBNXI+ITZuzNUBx3+rz4Ns -VdYVhEsilc5gjl/dqsvD/FJdRKHKDrSzvKznwEs7KpGX1AdYoWBYZ8jNaQcDdopU -VhZdE/196akrTRejZQhnjNaaCXKCjrubfeFGpZ4hTsDHLjzuTYkiZ7m5q0Kdiri0 -9gKNdp6b5edyLuuMimEviEsZbYritZbwP1kwGiOMSQi2tzBGUcIANugqxMhSUrgy -JQ45Eew8mLnNqEOgk3nuWf4m0LPzTlJ/R70TmLIVyJrZ51GcLYmTZ/czsfkhXaPT -sTuBRgqFhJNb2ukjq8XPJH7O0wOhbUKT7MCRXSlFttUCIZ8aOmufv5mYLuaGx0sd -8uJEEMZHKDeMZOZNsyTZNaged77Onf+AoUkSH25aTdjU+bpUn/0CO2aJDqwp04Rq -7qOrtGQ76miNnw4Fe/eHJuUoqp8VH4dUmFO3vZ24N+kSzF5LDwEbgyybQN/cot0i -rjm8iqcimwS+BISEm7UvIeK0AEzXmxNC1mXEwvY0lkIci6TpH2Fy7OGaCu5MTru0 -XrOORWqxMLo65bTQ0ciUSxw8DartL4xobOW2UY+EUO6Da8yhVRbO59cC8dBbA9J4 -fH60efPhziFt4aKvAgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUaGCg8RtquvpSIbS9Va1yqdqyXuYwgYAGA1UdIwR5MHeAFGhgoPEbarr6UiG0 -vVWtcqnasl7moVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV -BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1jbGllbnRfY2GC -CQCmtv9vR3Pj+jAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBAEmD -XMO4s+WP86pM35pFMbrz3qsjiB6Cw3tqSoUaw0eIi88uWk3ismREV/24wY4j+10+ -5NACcPLykZLh0XpQjHMIZu4FEZSsQP0ExnHluaS/XaFf8hIy/qLFcm5x6wZ08AeU -M+daf9BmCSrjuW7u2bMxIrRLcnLMQG1kX3t3aEQLl/GA62g6Ll3MlHBGDILdvdNA -jIscctNhnrCPLBc+ykifa5NIBhz1PWU1RTr9JyNJwLaO2To9LJcpZKda2LJJ6xYQ -/lzPBg0aJgw9rOOgdenhb4ijQ5nMWZqCDZZFiKej3e6pj+M9E4a6OlelHiRPZT7j -q0bSoDDNTCviGlap/LDCBTvzyU/c8hgJ2XSUMfOL5RTXQTmqF7eQEMepmNl+J9HT -FYv80eOtk3O6rnIVHJ25zjLcLTD8iDzH3eX61bhMphI65jr4ltC6fGetXn9xINX4 -lpuxpMg5sRIYLl6lUdBcp1pMdsjEWUdiPcAxhjYqthb9MeSgmAG0cEJ+EbgGbiJA -m2DpQ8HkQjd5gc2mCs1X5HKiFWr3ERTeQwzBwUZmNaupfgbDWpKi8xrz91r3tLVN -eFjyd2z+0VtM82KP8D34ZVqssjp3jS8N9H1h3NoPqZPtFN3DjXfFV7BsfrcGR9CN -mwNfZlxB487I+gXYIwAG2Tp1UYNQ1JDDfkF39Uu5 +MIIGAjCCA+qgAwIBAgIUZzkKhtgm6Y3RaksChHMIJFKV+U4wDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3QtY2xpZW50X2NhMB4XDTIxMTIyMzE4 +NDI1MFoXDTMxMTIyMTE4NDI1MFowUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB +MQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3QtY2xp +ZW50X2NhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA1Rk7zsuwXn8r +KMHk+gmvaftFmlY+NHs1mKJPzyCGFnablnJtHU4hDpSvvNitoQ0OcurOo9V9ALlA +U2uw/1q6Yhg1Am4cXwSWHG0/GwCQAdPTVb7W1MiAd/IB5bx9xrwfjrpGLjVLS3/y +nOKP+kl1bf6WAcLEPClvH+kSG8xMwvg58ot7ipWQcWBTSuZLaz89d2yfxpvtwrvS +YDemY6f8Tkxil+kDjb2Jo/zdRDz8eIEOs1PcdztrdWWeQaYJVX6aEOHCfdVNOHw3 +jNQKyVREUgXjr/pkwo9fTnZjQdBUhZIo7NuPPG25t5qZK3dUDuLcVRQ5Vt0/45pZ +/HkZDCkxmSynZWz2gPClOHVPOG8Eqi0Mbd3XxQSsd1Go667oFotLvTuynbYhdh4s +xAJWXbFV26HgDXI5wXueXrs1n0stUlbD6KahfeoYBu+idX7gB4RftqhqlbIazu3y +hj22k8cMQEPkLhzmUwRt64juLA0+FRG0Hfr8vdZD+f91Qbv86Qw3c1/lckQIOlyI +MerljNbCbHJm9KOZGf1zizwvMVtVzuVtr6RY+Loov4gzhJ5kNSk/YDMQC42c2Yhz +Lr5y9EGe/cL8QXdKfjKNeJjCbzxTTFiVBq5XRKUgjz6ga+F7KGO7ayMBrexZ7+ap +z7ydlUYS+xp43hqdisAGmUMJdDVlHCMCAwEAAaOB0zCB0DAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBTq92tDG5TfVvTqbu1bA593K6aAwjCBjQYDVR0jBIGFMIGC +gBTq92tDG5TfVvTqbu1bA593K6aAwqFUpFIwUDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRl +c3QtY2xpZW50X2NhghRnOQqG2CbpjdFqSwKEcwgkUpX5TjAOBgNVHQ8BAf8EBAMC +AgQwDQYJKoZIhvcNAQELBQADggIBAMHOXRUUq5vf9G2NvnAR1lb0fTKx4/6B9rhU +Nli9uIoWGQyMu8icEMistUp4AdHWdhutKX9NS0Fe3e5ef6qIYCng0gVBE3fTHJd4 +V8MhGtyaK0K/gpTrJdClwK/litRIEjCFwNYEK8vtuqNjR82d8IuFjnbinb+IGCH0 +sLRGvvZch+dwM5N9BVRq20M2FZhyI+fWZmt1ZiBwnfy3xM+enD2I+/LOUFoxAmGS +m2vnS+ULhq7fLaK6vgyUIGqRDQMxYEql9QGzRIspV9vVhRuOCmowlJbgCv++eOUG +FvjlAPlQRGJ+ShpXO5n2pEkdjIJOrLf4kyviLDHffIl5I80fRWzv7GJ1HP+Bb9qO +LZGaiO3SelPhvJGTSV5uSZpgkFsBbgdbbGI60W2QQIHEwG0HdjnNk17+TmVEUoCj +rWK/Kzw5py1Egtibju4CiJ8uIKeew+2pfdnnyHoCVwCfdACc4dwRpet6fQvkRcru +5PR5MzZqUI2+bjg/hJrHj7SVpxpjcr3OZdh05T+heCVuPp+9mHBmcxbeA8rkMZAq +vILLwgwEriSbKy9Y1GLs2oaPNaWEpN9Q6kZPUwtwlzjHG3OOtldeXPpMVpg6Sb0y +3NnRfvfV/g2gm68S21j6qhGM2aeQCdCu5insqnR8GS5/stmuyCNnlst24JBneE0i +louEQ0EV -----END CERTIFICATE----- diff --git a/testdata/x509/client_ca_key.pem b/testdata/x509/client_ca_key.pem index 3fcfdb7074e1..750e20bb0f2b 100644 --- a/testdata/x509/client_ca_key.pem +++ b/testdata/x509/client_ca_key.pem @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC/S0RijwLM0r0c -0isID+p4mj8tMMG0iC466etFlRwfY4zkiL4Amk3r/U8//swmsQqjP4SKwTVyPiE2 -bszVAcd/q8+DbFXWFYRLIpXOYI5f3arLw/xSXUShyg60s7ys58BLOyqRl9QHWKFg -WGfIzWkHA3aKVFYWXRP9fempK00Xo2UIZ4zWmglygo67m33hRqWeIU7Axy487k2J -Ime5uatCnYq4tPYCjXaem+Xnci7rjIphL4hLGW2K4rWW8D9ZMBojjEkItrcwRlHC -ADboKsTIUlK4MiUOORHsPJi5zahDoJN57ln+JtCz805Sf0e9E5iyFcia2edRnC2J -k2f3M7H5IV2j07E7gUYKhYSTW9rpI6vFzyR+ztMDoW1Ck+zAkV0pRbbVAiGfGjpr -n7+ZmC7mhsdLHfLiRBDGRyg3jGTmTbMk2TWoHne+zp3/gKFJEh9uWk3Y1Pm6VJ/9 -AjtmiQ6sKdOEau6jq7RkO+pojZ8OBXv3hyblKKqfFR+HVJhTt72duDfpEsxeSw8B -G4Msm0Df3KLdIq45vIqnIpsEvgSEhJu1LyHitABM15sTQtZlxML2NJZCHIuk6R9h -cuzhmgruTE67tF6zjkVqsTC6OuW00NHIlEscPA2q7S+MaGzltlGPhFDug2vMoVUW -zufXAvHQWwPSeHx+tHnz4c4hbeGirwIDAQABAoICAQC9otcLQazL8kpprOvd1TFj -F75zhTcySiJSYxzKYTR85YqB8BEztcRzoy2SSnyGCtJ53Xj+uOTL+U2hkZvbuiTU -qzVPmvFJBxGcDpAmBFCANtafpA2adT2Zih6kAt6TJjfaHLBpnvMhyTpJsbpJNWDe -BA/auBqTlvg/PziJbRTCz0dUWpsjD5c3/reSwmW7EvcSWQCiWZK78p3IyeO8GZTu -uBESZMrQ4v5p5DC5Ddf3yN5R0/YwROf0XCUamdajCu2Ouf6Y9dGKuNtKED5eUC++ -SuYYFhXoEKl04OmioH8jc6dfo+tw6XfSPOwzGly60xd3y+KPqF8J52K5VPkm9geC -NEttAEKEpwLX4cAsxzQ09WaL0fq+XSpwWZYuAJI4F8zPadckbzittkAFGnwH6t5N -ydaYoAcGxz9x97qbu2iS9SiN1cWQ+OSMF+o3o02WcLNcIBOVIKivV1FuLgQEPfXw -bi9egAOUI5TUvoVO8mG3Drk5+Ii6PPxEaCKfp6x0xXA+t8JrmOCsEoYRiPhCc65B -gHZC1+mgngYUs6PYmkPgTgBfYAe2wYpn7uaCEo06tNfe0kPqLzr2uMEKZNY1IfoM -5RMxic9qKac3Qp2Lf/XG/90L/wO+kVpv/HSWh8JAZXezYD9f+EhrDuYae8KlsKXE -Z+XGmMdgIarHLGnXoAqpeQKCAQEA3002LSywsvGM1KZz5tQoIyaor2kD8tW8vyS8 -7TlozM1TI58ALtDyV/LCrvS5jJEIbsdlrrOeBhQOS3RPjSQQdEQKfSly1TF9mhE2 -vDLznxFOQNpdkkzGwfLxI/5mMbeHN6960XAcfVD5QTDdpKPY+74uQU3HzQCx6Net -+UK3aT6CeIvgWn0xNnR5Fk2EnQHKUduqm0sRj5c2S8qUO6HxD/VPNRCT7G+faex9 -tP2VIHxwF4iH1WOmwQWxTLpy9wR7UYYxpFBvQN6gglHuMeY6tublTnvfhpMdZ5NU -Zd1Trzrh4w4sXRWStHkphJK9aQzHEclZq5ktvdJtFd/GGZfsrQKCAQEA2040LYoT -JcassmJs9UzgJeVkJIB61wmBc/qvqwKqy28niLubSYNaETO6cQzPrnlZjk6LKa+M -HumrlA3DjobbkmA1YI/OAhIHjGMEtaxsOTUz0rMR5RDOvRc3hXo2qKsXfDQGUtr+ -1DrCmnI/iVm8+F8HtV9tEHzrGEaCmeMLHQWCxveNoGDnZRCZds52ApoFxiLnVq3N -+ocQEsWwdOg+8ZdyfF7RqzW2e22WoCkTJYApGutDfu1eXHXlOeBrBNPiHMzK3pbA -n6+oqcxB23NRttNeUkge3UezjfQfuGqR7CLi0yF2L236MGBOGuXo4bGaUMgEz277 -ZBT7YfWhZpn8SwKCAQADqM5Ee0ECDbdTHM81bzChMtb82Om5pwsKzt1Rvekbwhmk -scxc+AugqVfLajNIPHA48IeYD1V9oAKD9gn/tCGY5iyN1IoPOFpolfOhrewUJUJ1 -CZ8S8LMpJoQRJPAjzHAo13VZzU6KNzN+gACB3DWIGpvDcjTeBS7lM/Oj7BX5YY7d -zt0EXpzZ2ZrKZMbRk9/u63ymQtqs0buQDmfTelnq+wgrRHRIIaQpJjkBKE6zU5a6 -rAAd3R40d5VqPnv31Fj5Awv5N2A7XeqfeBxBMRaxPKNxX9JP8EVBF0cAzFm8u2hM -QkUz2VCoKHwnsgfsmssAXZ5ck4wOWk5zV1F1xemZAoIBAAR+esVAIhpREvLo33C7 -bZB5Pe8djubfM/7rcTQg7t0SXw4HQixke7EEjVqJt6vMotAuvd1R0p5DjZeQHKTM -EK3UOOPMrp0OP4dZ9BvA98rIU1KLBt/Z01K+qg2bLomQT//klQiXokc5GQnPM4we -AahZUjAeT37aAHtT3pNGutCSb1aidg2GTtecWni7zGFLRLkFuBXno+PxZpvr3yzW -IYwT3W29B7Dpfd7TpRWNIe5PzQfXMF/mf1uHsvXXqnnD2ctbSwD6t+HN2Lf6DpNv -ron/lNw8zB0evgg3q3q8/FaJdHp9Ig3gxBK/tnoIohgV6qKjJq4ViSNI5sngHbmb -iDcCggEBAJ+Wg1Y3UnPShQjAAUyOeqfdLnb0h6ocz5Flog4I49023ro102xav/Rr -O6NzaH8nBHt4OKYWPgwa1ANZ1ujXfnqU531OlB7p8vllDcECSR9qnSE0vMO8hvbU -flREfjy2inQ9kVwCqLbYHh2XEYZ7sEwQ7p0dz1v9G1ytBslwyeC3h2aMIg4utT/k -73y0T5Nq0e6Mas5w0ZBemzKNHoKw7N05g2rrELL4hRfkGMrEIsSaANPDRM+4cI1k -a3CAv0mex+5XeBskUCtvU+xrCH6isDovDhCT/CSAjuEatezby6tLk8PeaH0uEaxr -MhPlrQvyfY9eITe9uSQtiTQRg+Z4U5E= +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDVGTvOy7Befyso +weT6Ca9p+0WaVj40ezWYok/PIIYWdpuWcm0dTiEOlK+82K2hDQ5y6s6j1X0AuUBT +a7D/WrpiGDUCbhxfBJYcbT8bAJAB09NVvtbUyIB38gHlvH3GvB+OukYuNUtLf/Kc +4o/6SXVt/pYBwsQ8KW8f6RIbzEzC+Dnyi3uKlZBxYFNK5ktrPz13bJ/Gm+3Cu9Jg +N6Zjp/xOTGKX6QONvYmj/N1EPPx4gQ6zU9x3O2t1ZZ5BpglVfpoQ4cJ91U04fDeM +1ArJVERSBeOv+mTCj19OdmNB0FSFkijs2488bbm3mpkrd1QO4txVFDlW3T/jmln8 +eRkMKTGZLKdlbPaA8KU4dU84bwSqLQxt3dfFBKx3UajrrugWi0u9O7KdtiF2HizE +AlZdsVXboeANcjnBe55euzWfSy1SVsPopqF96hgG76J1fuAHhF+2qGqVshrO7fKG +PbaTxwxAQ+QuHOZTBG3riO4sDT4VEbQd+vy91kP5/3VBu/zpDDdzX+VyRAg6XIgx +6uWM1sJscmb0o5kZ/XOLPC8xW1XO5W2vpFj4uii/iDOEnmQ1KT9gMxALjZzZiHMu +vnL0QZ79wvxBd0p+Mo14mMJvPFNMWJUGrldEpSCPPqBr4XsoY7trIwGt7Fnv5qnP +vJ2VRhL7GnjeGp2KwAaZQwl0NWUcIwIDAQABAoICAQCgH7bmG/4p84qdtJx3GaH6 +k/noD9fsHYzXZVds/zZiWLtuoArHk3aZezZWQ8asFqB9z1x4lSm5ynnAdVJpfmZA +4Ymrisu8xjh5ocliY9jR1radXqoU95g5CNtOIoWsOJ3J5MRpYlhyofDO3Btt6ZbY +kQ1sw0orHsNGih62Tpx7gIQicZbiOqJv3v6XcFbJfpqUS0X/uhk9U16wOADKL2cR ++qm3Fjs6XWq4k4A8D0tyzR8btu8ZlMeZTkNNdxLacCgaeVlorke5IvWm14pHYA96 +Rryg9hiSbaMi1SieQonQWFRyLkUCFj0P7pYbqC28hdEkCO9RCy0/vDLT2LbugWGn +JBdPIQqRYggGnEdRocvflx6f2Xdid9I4zrI2XWnorbypqVIdmhVivCCWK8PNqKE2 +YcRg8TRQHvyOXoR55Sodrxp6KycUc65nduGe5jsyjA9hlQ0Jfxhr4gv1LuytnVCx +Z2q2PFF/cznrSLlU8uBT9Lb2gGQXRyI/rxp6g6zwnTKLvMXsQqBrt2hzlE2vkdiz +I8EcLp99IT4CwSJAyGFdR/2ZmXg2Hy20GiGc5RisMIsXvj2gVt26XHrbb+LnYHMq +0+5d5QoMnTMZC+JczoDiw64vQlzJGcM1VWFDOMn9g7UALgofCQv9/nZrWLjw5hIB +FCli4JhtwjNUP5Gz2sqx4QKCAQEA/zY1Op8i0j2xaxeaysZg6midIKyvS0a+E+CJ +qfNE0qmwCEmG/T67+IvKIwXqfBGBrBntg3De4rTDxhTVL4S4Ue89WYzB/sf9J52e +6HEpBCujRJcdb8ouxSfDkpkMYXsVsVTIjckbQl731cm4qk7L8DS1GuE0oZs9I3kx +iQSzJ1+GzRotnzO6a11NU5n5N7NM+97x9z/BHFmd3fUOlUkYdpr67PVBNKRaj46k +Ifs0Og7cZNh2JCzhVOOYrf/x9DybjCJnPHLVuNMqYOHTNI/LgpFytM26+Lnmu1X3 +mcohVacygr55oZaCC0dz6CijEOpX9SL9sUZJb/tJ01Sxv/pgMQKCAQEA1cG6Oa7h +1Hl1f6Qqg9qiWNTHur6dBOw4lt/ej8vexB6y9c85WoMfXUBFpiQta9Nt+XCrC/UU +wY0EqdQir+Ydwg92ddX+1eKb7NmNLi/moUF+s0V+9uPvgGcz6xVlSMQKTgsYxZnZ +CE8ZSBTSD3dYyIadGQHaFoP4PsABzGfzYjWnQpvk4SZf055Qs0Gt6vIBlbs7R/O/ +wPajzFYf/o0mAaPAAdPpuK6C3Q1J4Gp5LKkHtzY79XFl336uXQV3AwxU29sAkmVS +/COFl772Ev55P5nV8NsEQaChoyuNHO11YQtZEyh7zTwx+R4SfnTivffVQNfusnIa +gKuj2Eoq24XgkwKCAQAeHRJY0XA1aIwnu8hLBu9mmWN4+IdSlY1WIRd9UzQau2UH +BU4FUcKySCRYz5jkfNhVK1YIPWg/Td8P32NsUPfCyzzs9Rvq6UQoyYN3n+qcEF4a +eM5DY5LzNobwJFj+o5xiqUNk34b05OnPcxb0GYoc1MtN2abxLrUfG2zJ4yEUk0P/ +rYgWke78Pi0ioTdz6Bc8XQkmCILLypNDHmhTGyXk0NKs5R+Fi6MX71fUnqSB+UDu +MVB3YkhQUO6yEVJGZGRiO6j8y/wF6/zDI8JdIF5+EJV9Wg0mziC4mCM4JU6bobfn +D3ygoXbEx/CYQztCgrRQO4m9wjJmITuL0SGMKonxAoIBAQCCelJ2S23GCK3UUB0z +hw16M8gHEbs++gJA9j4ggE1mYWbT7L4RpeBLR6Q8GfEv1EtY65E9J0iYLMAf+kGC +JXEct9uTaiC35i9PkCxBeTPKUvRH8a/ifJgBRP3IDbNZi3DO2q8wTwzPqZjBCxR+ +JFepb6INVbgN7lhl1UZDw2ApHp8OZaJ8XLQ5tHWGNh03QKn+/97buMnfu62YWSoG +c5ozfgUCGJyeAsgWrrndpqB4xmTTTOOkmqeYmPdOCLvwvGJAIZpjwj25cuVlD0ed +qH/SdtDEyKv8c1S3CSqF8dyodAjXTOrlCE1oxxZ64lZVpyYhAq3NdyD+Uccdi4hF +n57JAoIBAQDFuv2cmOl34qQz1vd+R0axxNSQEwYC9wug9WG8PEARRmk6vCIMy/AZ +XnHXZ4aV9ds9q0J4hGrx0C1vjGHSpBFR+kulI+KcIITtHLPTAgE7e1UXGATVz0+B +ES3qvzJ1eXhl7hrrFfYUdmPok7pJUhf37qqhKajcRfVtHccaB6J7v/sbAMCIXP1B +ij7EESZgM+NLwOQ/iAM2Bpuphn+gxdV2oqgorx3kLymzffhmn0oq6qfn818DH5ps +sPgi2bndSxG9jNtpCIPPC9ltMNwWxuB+3f+wd2pKIjBulJ9tb+72s/Vb4v7EOmJ/ +c/xqN5lRsGXGduw76PipTrLpy3/LkZDL -----END PRIVATE KEY----- diff --git a/testdata/x509/client_with_spiffe_cert.pem b/testdata/x509/client_with_spiffe_cert.pem new file mode 100644 index 000000000000..b982fcbe554f --- /dev/null +++ b/testdata/x509/client_with_spiffe_cert.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFxzCCA6+gAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx +CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV +BAMMDnRlc3QtY2xpZW50X2NhMB4XDTIzMDUyMjA1MDA1NVoXDTMzMDUxOTA1MDA1 +NVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBANXyLXGYzQFwLGwjzkeuo/y41voDH1Y9J+ee4qJU +OFuMKKXx5ai7n7dik4//J12OqJbbr416cFkKmcojwwbAdncXMV58EF82Bt8QRov0 +Vtoio/wxlyRlxDlVYwr56W+0EVP9Q+kzA/dTnMgOQYIeSix96CUQRy8XDu1YX3rk +fiUkND9xxuQw8OXi3LXguv/lilLVC/lXiXwa0RWEgMZZU2S1/lAElAG3aZuuWULG +K+PpKPuqkcptbUPCvNN1eUs9/D82aoFuqRCmpTC+7bUO+SJSggpUHcgTbXT9i6OO +9eR0ijcaQjtb0Y6ro+Cv60YOnlGC8It3KoY2SxioyqdceRUohqs4T4hjBEckzz11 +AC0Pj0Gp4NJPcOY68EjhD5rvncn76RRr3z2XZpd+2Nz+Fldxk/aaejfdgqs9lo1g +C+aP+nk9oqSpFAc+rpHsblLZehUur/FHhenn1pYWqkSJsAG0sFW4sDHATRIfva3c +HNHB5kBzruGymywBGO0xOw7+s5XzPiNnbXT5FBY1rKG7RwlqdtDh6LWJRHmEblWV +tPHNiY+rrStv0rN7Hk/YKcSXd5JiTjk3GXjO1YJJVEraEWHlxzdGy+xu/m0iJLed +pxZwuxxdZ/Q2+Ht+X9pO2DsW8BQFbddCwbooxKygwSlmHCN1gRSWqWMZY5nzsxxY +tic9AgMBAAGjgawwgakwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUyiXne0d3g9zN +gjhCdl2d9ykxIfgwDgYDVR0PAQH/BAQDAgXgMBYGA1UdJQEB/wQMMAoGCCsGAQUF +BwMCMDEGA1UdEQQqMCiGJnNwaWZmZTovL2Zvby5iYXIuY29tL2NsaWVudC93b3Jr +bG9hZC8xMB8GA1UdIwQYMBaAFOr3a0MblN9W9Opu7VsDn3crpoDCMA0GCSqGSIb3 +DQEBCwUAA4ICAQB3pt3mLXDDcReko9eTFahkNyU2zGP7CSi1RcgfP1aJDLBTjePb +JUhoY14tSpOGSliXWscXbNveW+Yk+tB411r8SJuXIkaYZM2BJQDWFzL7aLfAQSx5 +rf8tHVyKO89uBoQtgEnxZp9BFhBp/b2y5DLrZWjM6W9s21C9S9UIFjD8UwrKicNH +HGxIC6AZ6yc0x2Nsq/KW1IZ6HDueZRB4tud75wwkPVpS5fb+XqIJEBP7lgYrJjGp +aLLxV2vn1kX2/qbH31hhWVpNyPkpFsT+IbkPFLDyQoZKHbewD6M56+KBRTTENETQ +hFLgJB0HiICJ2I6cqw1UbDJMJFkcnThsuI8Wg9dxZ+OffYeZ5bnFCVIg0WUi9oMK +JDXZAqYDwBaQHyNszaYzZ5VE2Gd/K8PEDevW4RblI+vAOamIM5w1DjQHWf7n1byt +nGwnxt4IQ5vwlrdX3FDcEkhacHdcniX/FTpYrfOistPh+QpBAvA92DG1CbAf2nKY +yXLx+Ho7tUEBGioU4XvRHccwumfatf5z+JO/EvIi2yWd1tanl5J3o/sSs9ixJfx4 +aSuM+zAwf8EM+YGqYMCZ896+T6/r7NAg+YIDYu1K5b5QqYyPanqNqUf9VTR4oQ4v ++jdb5PkujXbjENvkAhNbUyUbQJ+IU0KHm3/sdhRPN5tuc9C+BTSQvlmKkw== +-----END CERTIFICATE----- diff --git a/testdata/x509/client_with_spiffe_key.pem b/testdata/x509/client_with_spiffe_key.pem new file mode 100644 index 000000000000..6adcdc3122c3 --- /dev/null +++ b/testdata/x509/client_with_spiffe_key.pem @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDV8i1xmM0BcCxs +I85HrqP8uNb6Ax9WPSfnnuKiVDhbjCil8eWou5+3YpOP/yddjqiW26+NenBZCpnK +I8MGwHZ3FzFefBBfNgbfEEaL9FbaIqP8MZckZcQ5VWMK+elvtBFT/UPpMwP3U5zI +DkGCHkosfeglEEcvFw7tWF965H4lJDQ/ccbkMPDl4ty14Lr/5YpS1Qv5V4l8GtEV +hIDGWVNktf5QBJQBt2mbrllCxivj6Sj7qpHKbW1DwrzTdXlLPfw/NmqBbqkQpqUw +vu21DvkiUoIKVB3IE210/YujjvXkdIo3GkI7W9GOq6Pgr+tGDp5RgvCLdyqGNksY +qMqnXHkVKIarOE+IYwRHJM89dQAtD49BqeDST3DmOvBI4Q+a753J++kUa989l2aX +ftjc/hZXcZP2mno33YKrPZaNYAvmj/p5PaKkqRQHPq6R7G5S2XoVLq/xR4Xp59aW +FqpEibABtLBVuLAxwE0SH72t3BzRweZAc67hspssARjtMTsO/rOV8z4jZ210+RQW +Nayhu0cJanbQ4ei1iUR5hG5VlbTxzYmPq60rb9Kzex5P2CnEl3eSYk45Nxl4ztWC +SVRK2hFh5cc3Rsvsbv5tIiS3nacWcLscXWf0Nvh7fl/aTtg7FvAUBW3XQsG6KMSs +oMEpZhwjdYEUlqljGWOZ87McWLYnPQIDAQABAoICAAY5tM7QZm67R9+hrxfw4f6x +ljfSLXBB+U5JFkko8DbhvjEN9+PQCda5PJf9EbUsOIWjQNl6DZjZsR3rqnog0ZGn +kB0yuPs8RDjrbVIXOwu/5EurWb2KZIpSjL4+BWflsndiMD6x6FSjDzXXDFrv7LKc +u0uQzLF3F00avDSEP5NvGUIbWnE7Z1cZIdj9ABQAJuVAI8gOnwaIdTsODv02jjGp +BgxoBbKDFsSb7yb9QzuvhizEitd8FajaGsqAaZYh6JwiRjkb8jl0z+u6MoqJNACm +q/gG+JLg1deIpS6OM2OBbKAr2G+HvXJMVklsdQkl1b+DcuJsBkW/gLHn/3WdQDyq +t9sB8XrUx3S5dy6oroj9tcrwwlpUPbx3/37BX7OEn/NlIWZojI62hGexoFaggu3O +Jg0JJYH8THlQqs9G/oXmRTQKngse2FLEIPePie9vIIvokiQtG4T6miOK+6QmxYZq +H+KPT8AQG8j7AEexo4is1mEayapmTxftIYANOLFaT82BhsUOZksa698Sz7k1Cf/o +MSFn6CocGLflMEzdBqEq0wbBkdeuKUKlXG3ztXlqElN1xFdbzkaP/Tzl1ooq3kLR +0cLBCJNrHxTo1tuW4qTn+s4GLHpM4PE4YZgMehVWtyRFBb7mrSXsESw03POvulBx +65vA86DtCPm/jVuC5lQBAoIBAQD8IWDHYtQnvn/za6etc0fc7eTyq1jmoS/gh33y +eHaY6IccUN2LXCxgTJYwmfy57De58Im5AcOnkgGvw2Hw2i6+A5i4tKkyCa9awW4A +M20QOnyQpF+9uiIqGzI72skpfH20SvgTstTFtgGr3UBOqTfcApL+1X4guvGnY+Cx +uHUAPzbis9G3CNOWb4iiLhUcBnTDZyB3MPM4S1U8E5JLFo86+va6gbqdBP4ac+KH +08XDk/z6ohp9p796o6IiBQyZEsVaYLCrzjSOXeFfE5Fyj2z53oGlws+/PdhXKo02 +3++zRESiLVuGbCmAN17nKwDbZu9kFfGNP2WdwhJt9Yey91I9AoIBAQDZOsXWNkyP +zoDcSrvJznMPFcwQCbMNyU7A+axXpAsxDqn16AQj5/t1PWqufjRSdC7gVUWFcQ2K +ldUHkNyGtqHVCcNpqsMZJT51NlgTOl1A3GLnmm+tAiMCcr7aySeNnlj08fW278Ek +fnxpgUqGtXjTFpArULSFdZulXNPAP85ZDBburJtdhMfiT3GyQ1iRZcXkzsUVzNU1 +nGGk0jtCodlzQKiz3/aHO63G0GAjtdPuXpzGm7nBJSgLD0GabkCdkHDFULOaraYy +t1zsCsg7tQWa4KGRDNkcJKzoz3zf1sI4g87UJceGoXdB+mfluyKtnFhqjFalFW8Y +14Yb8YYdYHkBAoIBAC1pZaED7+poqWsSjNT02pC0WHRM4GpJxfHO9aRihhnsZ8l1 +1zFunJ+Lq9F9KsPiA/d9l5C2/KKF7b/WlSFoatrWkv9RqtfUXr0d8c4fdRljL2Rt +9sCZceXbmCSnt2u9fHaouh3yK9ige5SU+Swx1lnOLOOxWFJU2Ymot6PK8Wfl+uDC +OpeZA2MpG5b6bdrqXsWDIZnWOzh8eRGlBMh5e7rH0QCutQnrCEmDbd3BCvG7Cemq +oNLZD+fq6Rzvg+FePCWXHLsVHOo3how1XhEgPCSVKwzMFdcAMKMiiuTDWM0VEreT +K9T+TktFrdY9LJ5X3+5K9YLXVFohxmf/vT1CxpECggEBAIfegeVU+xgrYl/nAoPb +9A1oZcVWO78QvYhn4YrDmRhrApVDNGu86oPPEU3otBMqhjNcQmqPZpfa1W6xBa3g +x2H3hFkwLG0q5WDsx7PnGnK6JcaUyurcXkdmu8ceb/XdJ+i0+ioc1aJc1rYq3xFY +qiTlhPECvpaHE/4fDHa/sfHyZNmN7nNU3KzJYeTMyLXQgTF2vsC+6FBq6ovrzpMD +pn224I35NDorcqrapHdRgCgk10xGFK4g7mXUegT8lr+2m0JfEqdZm403MRCWQd1O +gR35CDUwYw9+RQQs2v8qVTqB/riklKK5lV0YISoInU0XcBncg0koGd/g1gneTDNN +pwECggEBAM4sDCCPplzbyd0yXLGo9P3RYIsNFnKnIm0YGRPrevBaiux3Qhr7Whpi +eV04BJ7Q58Z2WFzPFMhdXU45y4c6jIbmikdplEW1TASgXxOTvTfhg8P8ljdLPx+R +3CvQi4BPkJ3ZtYrHLKXKF/9aseyHLlSzuNUAJ6H0YxVi0tmzCFG82SWcFOzhR2Ec +cWDptGTRt9YY+Eo5rhPYbX/s8fCcW2u9QGnRnX35F8vJOp8Q7eCONIaN6faV4Yos +1wk6WXjZfDgEdjxmrnqXrgxdup82uD4Q1agmkxAjPl/9frLtHMW87Y0OixJb/Sve +eSCMKThlBQ57WubHTi2TbFBVKph/rP0= +-----END PRIVATE KEY----- diff --git a/testdata/x509/client_with_spiffe_openssl.cnf b/testdata/x509/client_with_spiffe_openssl.cnf new file mode 100644 index 000000000000..cf96f271d4a5 --- /dev/null +++ b/testdata/x509/client_with_spiffe_openssl.cnf @@ -0,0 +1,17 @@ +[req] +distinguished_name = req_distinguished_name +attributes = req_attributes + +[req_distinguished_name] + +[req_attributes] + +[test_client] +basicConstraints = critical,CA:FALSE +subjectKeyIdentifier = hash +keyUsage = critical,nonRepudiation,digitalSignature,keyEncipherment +extendedKeyUsage = critical,clientAuth +subjectAltName = @alt_names + +[alt_names] +URI = spiffe://foo.bar.com/client/workload/1 \ No newline at end of file diff --git a/testdata/x509/create.sh b/testdata/x509/create.sh index 5bd3c5801e96..378bd10cf24f 100755 --- a/testdata/x509/create.sh +++ b/testdata/x509/create.sh @@ -9,7 +9,8 @@ openssl req -x509 \ -out server_ca_cert.pem \ -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-server_ca/ \ -config ./openssl.cnf \ - -extensions test_ca + -extensions test_ca \ + -sha256 # Create the client CA certs. openssl req -x509 \ @@ -20,7 +21,8 @@ openssl req -x509 \ -out client_ca_cert.pem \ -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client_ca/ \ -config ./openssl.cnf \ - -extensions test_ca + -extensions test_ca \ + -sha256 # Generate two server certs. openssl genrsa -out server1_key.pem 4096 @@ -39,7 +41,8 @@ openssl x509 -req \ -set_serial 1000 \ -out server1_cert.pem \ -extfile ./openssl.cnf \ - -extensions test_server + -extensions test_server \ + -sha256 openssl verify -verbose -CAfile server_ca_cert.pem server1_cert.pem openssl genrsa -out server2_key.pem 4096 @@ -58,7 +61,8 @@ openssl x509 -req \ -set_serial 1000 \ -out server2_cert.pem \ -extfile ./openssl.cnf \ - -extensions test_server + -extensions test_server \ + -sha256 openssl verify -verbose -CAfile server_ca_cert.pem server2_cert.pem # Generate two client certs. @@ -78,7 +82,8 @@ openssl x509 -req \ -set_serial 1000 \ -out client1_cert.pem \ -extfile ./openssl.cnf \ - -extensions test_client + -extensions test_client \ + -sha256 openssl verify -verbose -CAfile client_ca_cert.pem client1_cert.pem openssl genrsa -out client2_key.pem 4096 @@ -97,7 +102,8 @@ openssl x509 -req \ -set_serial 1000 \ -out client2_cert.pem \ -extfile ./openssl.cnf \ - -extensions test_client + -extensions test_client \ + -sha256 openssl verify -verbose -CAfile client_ca_cert.pem client2_cert.pem # Generate a cert with SPIFFE ID. @@ -108,7 +114,8 @@ openssl req -x509 \ -nodes \ -days 3650 \ -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ - -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1" + -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1" \ + -sha256 # Generate a cert with SPIFFE ID and another SAN URI field(which doesn't meet SPIFFE specs). openssl req -x509 \ @@ -118,6 +125,27 @@ openssl req -x509 \ -nodes \ -days 3650 \ -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ - -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1, URI:https://bar.baz.com/client" + -addext "subjectAltName = URI:spiffe://foo.bar.com/client/workload/1, URI:https://bar.baz.com/client" \ + -sha256 + +# Generate a cert with SPIFFE ID using client_with_spiffe_openssl.cnf +openssl req -new \ + -key client_with_spiffe_key.pem \ + -out client_with_spiffe_csr.pem \ + -subj /C=US/ST=CA/L=SVL/O=gRPC/CN=test-client1/ \ + -config ./client_with_spiffe_openssl.cnf \ + -reqexts test_client +openssl x509 -req \ + -in client_with_spiffe_csr.pem \ + -CAkey client_ca_key.pem \ + -CA client_ca_cert.pem \ + -days 3650 \ + -set_serial 1000 \ + -out client_with_spiffe_cert.pem \ + -extfile ./client_with_spiffe_openssl.cnf \ + -extensions test_client \ + -sha256 +openssl verify -verbose -CAfile client_with_spiffe_cert.pem + # Cleanup the CSRs. rm *_csr.pem diff --git a/testdata/x509/multiple_uri_cert.pem b/testdata/x509/multiple_uri_cert.pem index 97637997e3dd..210b844448cf 100644 --- a/testdata/x509/multiple_uri_cert.pem +++ b/testdata/x509/multiple_uri_cert.pem @@ -1,34 +1,34 @@ -----BEGIN CERTIFICATE----- -MIIFzjCCA7agAwIBAgIUI8r7b2hX9DRwEQGWuRdk32eU5kowDQYJKoZIhvcNAQEL +MIIFzjCCA7agAwIBAgIUA0Tqj0ezdOI2R+W4smJis+1NRucwDQYJKoZIhvcNAQEL BQAwTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL -BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTAeFw0yMDEwMDcwNjQx -MTRaFw0zMDEwMDUwNjQxMTRaME4xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEM +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTAeFw0yMTEyMjMxODQy +NTRaFw0zMTEyMjExODQyNTRaME4xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEM MAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBDMRUwEwYDVQQDDAx0ZXN0LWNsaWVu -dDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCm/zjNYkfCTcq7tnVf -qkPEde1+M6s2z05iWDfoBeZfC2NwUxIBqAC6XTXTxqYSjEVRCQUzjVxyWQNiwuz7 -pK/xGZhP/Ih2uSQKTw8vkXay4HCOt9DR0S/XGcQNImdbawKgnGven8Jrg8UZDXrt -9R9Z0nRajB1eXvXOsEEoEfOnYthc6P+MxWJc0lnfaTlowyEgv84Ha13y1h46W6yC -+WNBT/kWqp/mzDTv/Ima8xcqEft9VUZ82qJ1DVt1064x8KOzm2x7F7QSIcjxr39M -fbASm8Vdnt10XfhdsDVkxTlBJs8WKGn0uw8MyPNjFG01OpYDHLAfJTL3XlvaUjfF -yDMFsRDVjfkuYIkqAVWQ7eleFfOFBYzaVf2K+2OvCR+vGAPa5NQ59kwogJYLjV/O -43axChBizcPM0p7gmRhhO7TQz7LLTea30rBJ/YtXdxFR11y9Jdq+i2KwWi8O50iO -hzxUBkbcQD/W9Bcn7gOkD/pgEGynWvFSs+UHjLeyL0COk0NiuYIMlOgwtI5BGwzD -bdLuTU/ZQm4BJBjEIGVHFqKyTqUXcw5t9fWxH8V0XNs8zqj9J7lvNKu9b88GnyaJ -fKMdDO4rVTJHmvFDHP9MUJHC9SabW8+hK0nuU7n3+Pc07ToCAan+Ych5bQHsRMjI -9EvxKVNfwIwNrmRr3mhbOU9xIQIDAQABo4GjMIGgMB0GA1UdDgQWBBS6jnt9IccJ -SOuE1KwP68VCBPB4hTAfBgNVHSMEGDAWgBS6jnt9IccJSOuE1KwP68VCBPB4hTAP +dDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnfQMbGQzzWl17NuJQ +annAeiYFX9mcyKi0ywS1BOsNpDn8SRgeW7Ymj8EqMYIUv0VK5QDpsJKUQ4ZDBE/f +drplyXhbR0/aMpAATxP4AjnzH0pw49aIe/n3KTdBZj/KF52qO7WG2rcJ5GSVr8Z6 +H8FeP02GRt2iOAbLqV5/k52gBEEzjSJ1Be1DiRAMOQL/Wahyo1XTfvB0UeG7nIFN +OFiTdQmgS0s6OUYWns8J2jcSO8XoSKCxuNz+ZzkisX7xicRFjkweLKNLuYmS4U1x +wd19JMizaCRoK8E6NnOGVrxP/r9am5ft3QgC6AIkzZvNcylcHkkhZo5++X5qC18u +mKjPgxCOzAop/pGeiItfkjnshccLPgoPBI1W/gO6puxDaTV3HFGZdy5rtV/6MxjE +byUf69aKaBEY9d+mIw0OB8TfyqzNSoU578rTbuEqwiG2f1IDe0KMB2xkikDXGXPz +YYRVdm4bmgVmY3fjpoiM06+aSN1IbYEnmvuB98z1nf8VyRdR39jVk80udfHHdQWu +xTBMEHBtAjT6HC0tKIkeGKB5kmozPIVL+6EbI0JyuRVB6wyCrmnpv0Mw1NzSF5NM +JiYD/5ScSKhnFogYEstq8Lgj5atyrqi703bNGDVGmbTqChBG133r30WNT9JJ4rzS +KznDJhoHOgQqagkzGgHsKeAmiwIDAQABo4GjMIGgMB0GA1UdDgQWBBRHg8AR1psw +kslOTh5a2nxGAmo33zAfBgNVHSMEGDAWgBRHg8AR1pswkslOTh5a2nxGAmo33zAP BgNVHRMBAf8EBTADAQH/ME0GA1UdEQRGMESGJnNwaWZmZTovL2Zvby5iYXIuY29t L2NsaWVudC93b3JrbG9hZC8xhhpodHRwczovL2Jhci5iYXouY29tL2NsaWVudDAN -BgkqhkiG9w0BAQsFAAOCAgEAoR4LbmvtSXLiVg7BRilvSxIWgcG6AI75/afuaM20 -PUTpyDhnrPxEaytb5LP0w42BCMoIHXDLE0Jmbxqbi+ku/Qw1R6723J7gwRSUYIg1 -a2S5Gue4AFp7aSLDUZhl0jPphq7OMKozzH5TrDgjKljYjPURClc/ODSlGdzOqlif -CbDHwrCorb+BFM3aFDE0pF06pnMDXcn/Ob9QCLIpvZEOWe/fJbPtTUiA5cY3knne -regyhvfqfVZtU52qg+9o6q5QchVqOt19alAsISK9/H4iVE+S79AiYEAU4yM4S6p5 -VW44idy3KXmr5kyVwJhe3t9f5Ckuswmo6hL32ec6M52ElrS8Er0vFt4bjfNgq996 -lTm4/reL/Anko9chQiGBe7F8J82OfxjLoVH9CbZjIoS4LiZPkey3Ze9HUV1sHhM/ -umkL54jRsVjEwwSCIcF9onzmiD8D7FV3AQ9W/RbBF3wZvVBBs9ZKQCxek2pZX/eZ -Q+BvXwG7NGArowpqbi+tSrW3O+XZzY7nXbbf23jCBwkBn3jvqn1Kwsr/T/HbXUaz -dDUvkwgyrX7NfvvZ20svtKLlBZTO5D8P9fy0+cHsS0XkPhw6UbHk396hoOmVZ+OG -E5uVb2sBy+vx+82IwVzWN0o7380AEmAA5nrA6fMaxTxmo07pOF7avAZ34LgHJIjr -sTM= +BgkqhkiG9w0BAQsFAAOCAgEAOkL6WsETiUWJT2lhMXEHGpLwu1Q4nETr4O51+V7t +AJJd7oGS/QRL0K6YNDgNQW6GOUZptvTEOSAO2irNohP+0+ITZAClF46ggB0pRAeD +COWjnG9h1aonMtVlnswh2xVYfg4jd+qfQZ07jN9tATn5ZBpFpcaxvcyAYc/eq6x/ +DKf7HBBWq9XWyRxZJuPD9qhyGPDzI/E2yr2ahLJFSGMRbTDivDUbw0yHbzmYnY2g +uPrVAAD4DuKsJxyZrA2/Hs7ZspBMTyUjWj7KSw64AcDvFDQgPBXDfG4CMSRH3Eh5 +J2F48ej7T6J1+PbJ81ISifGjUZH50haskBG5TKQqRX65p5LIVrDThsEM+YpfEyOB +mD2ylbxNs/X3b9fk07iS2HirfKZ0cKSINZPU+hEroasqxCcAY0E28Kzw0SdAGCGf +iZNRT0mNVgTPg7Bnrb7JhCBrm0aid0/nYFX+fqeKuS2lcdAcx6U5EgH0KnHg+9/N +NbSv+RtRiGWv5RqWF/Pk4bdHPvlzp/qiFfX9dQIOBtrFph9XUt/bEf6hZgaMKvT1 +QbQuM+rmf2ghjbqpCRP9iZUYBzOOvDZ8IeugguDvyBgrGaUSpreMzMC52B0fp2jB +Ib89u6yiKNNZzBGGE0d9y2qsju7q3IoV+eUwqbCUvGvcal+gdAfhO7Pvr3dD40z+ +g58= -----END CERTIFICATE----- diff --git a/testdata/x509/multiple_uri_key.pem b/testdata/x509/multiple_uri_key.pem index c2918fdd65d0..621c1b2c9a5a 100644 --- a/testdata/x509/multiple_uri_key.pem +++ b/testdata/x509/multiple_uri_key.pem @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCm/zjNYkfCTcq7 -tnVfqkPEde1+M6s2z05iWDfoBeZfC2NwUxIBqAC6XTXTxqYSjEVRCQUzjVxyWQNi -wuz7pK/xGZhP/Ih2uSQKTw8vkXay4HCOt9DR0S/XGcQNImdbawKgnGven8Jrg8UZ -DXrt9R9Z0nRajB1eXvXOsEEoEfOnYthc6P+MxWJc0lnfaTlowyEgv84Ha13y1h46 -W6yC+WNBT/kWqp/mzDTv/Ima8xcqEft9VUZ82qJ1DVt1064x8KOzm2x7F7QSIcjx -r39MfbASm8Vdnt10XfhdsDVkxTlBJs8WKGn0uw8MyPNjFG01OpYDHLAfJTL3Xlva -UjfFyDMFsRDVjfkuYIkqAVWQ7eleFfOFBYzaVf2K+2OvCR+vGAPa5NQ59kwogJYL -jV/O43axChBizcPM0p7gmRhhO7TQz7LLTea30rBJ/YtXdxFR11y9Jdq+i2KwWi8O -50iOhzxUBkbcQD/W9Bcn7gOkD/pgEGynWvFSs+UHjLeyL0COk0NiuYIMlOgwtI5B -GwzDbdLuTU/ZQm4BJBjEIGVHFqKyTqUXcw5t9fWxH8V0XNs8zqj9J7lvNKu9b88G -nyaJfKMdDO4rVTJHmvFDHP9MUJHC9SabW8+hK0nuU7n3+Pc07ToCAan+Ych5bQHs -RMjI9EvxKVNfwIwNrmRr3mhbOU9xIQIDAQABAoICADn4UuGJAlwC4SN0XR5OXqPu -Q/kROpgWMqGU+iNDGQtZSrWNQKzugwIupSbUyIWbx9wvg2y336WaHMDF5bodGy5Y -sjTh9wUvk8E4XI8oscm6e5gvWv/a2/6RZSsiDDsB1LGoWxG256im316o/UlpU+68 -TcO46+D8mdub96JPSQOMHotyHnPheRm7s5MIVfN1+SQDMSQGM2C+z1N2y1XT+I6N -kmw54rQdoyrDwYjWZe4mu+RwG73vr4Ful5c5WjjfzhPlGi1ItyusKrMrNsd4wgxT -opmzMjDZBgSPzJkklZF2RWDtuopH/Rt1DngQeTCHG9gMt164bQ7N5JjO/alcq8j4 -TW/IRlZOllqJ0KogOn9nX2ce9Kfxz+H36Yj54sKuOOYvKRsoiTNdTD3D6eB7pwnQ -KGWAGrpU4llbzotiG5NJ8sDHYUwynmhfmwIeBjq0vuXlITLQplGYQnsQJI29Py3N -KWBOC9HaiKCKq2gAUacj8BK+BLeGEiV9sxWQb7/MbWRxXnW4KhNI8+ft+PZOuvZZ -vLxH0wg4/bYQISMaeaqWL4LksKtV7es4MglFdCCZGDMdy1/btIHjRFPQWwIaXxij -2OtCozfmmzIc76UQ8g506q4rSgzZclDvI3Yd3cm3XFl4cQfr5l8WTQ313wrlmo5U -DjYdKipOGFRSLHt7aXABAoIBAQDY4KyfuCHFMqKC0FJZUCr3/gGU0aqZqHR4l5jl -N0TsTuwCRf4lK9BuM1bqumv6Nbi6VwWmp3+BzZCI/Nn7+s6KN5hyulBd56X7owef -zl9yWW0n6nJxKzutiH06krjmODtO44gLjR7ddcEd+i023hwIQffdAE6IEtYuuoD4 -94pKd+dB9GQmgITwjS5vEP67A0lpFlL6pNMfhhe2QOLUnDPKsgSnKgwJsbBYC19j -TQUpgFh4iCYKSGAX4ABdKpOUjbKGqNGrZNPQv+4MS9u6s/HWN7yDaSMT/tB9n1MC -g8m7crWyOuNJ5oO6SPnetkdTbcam7tiCce/auqjx2cMJ2eDBAoIBAQDFHxPHXP6Z -FHxI2pYBFyUB7j0VipwG3105ujrJJWu2abFU778SrkmM16eaWHVH6tMvuAo24mP1 -6Qfi09uAjdwhRPmIfManxj5wpDafgvG7H5g7+VhY2/IXTahO46JuZAxVoiXUGmct -WwmOy0vpI2IxoXY8qLvaJv+b9nLpNi1PVJ743BmPMqG3dInoRAIBxMMEu6Drrbj3 -bjPmRNpqhs7/Kn4IahCalD6lgSBkDuz7DaJji8jINw5OhiL9VU1eslXmGrCbZMXv -1QG0EjAZvGzqWPL88mKYTecndP1k9DMqVBVGhCT2dW1aLypQgDCC5YxyRc5vOnZ+ -2vQpELPeS0hhAoIBABBEqi5A7aeRKMePQN4aOV7o2s2C/L0R+cqh9IIdJzpioSl6 -fpnjM3tQtpBc84SNSxIPPQlHPzVJajIcZW2VXrDXgsP4XdbtbXH2xLekD1zQgHOi -DnuWtp9JwbsHDn+WcDx2rNnQ+CO8lYPeJE4dUxT7fdBCGaHzZ8WRj+MdDm6Pl/VG -k8yfj1lL/dOu/qygjn0ng4nxmzSeJmExdNJl9SybNeYkLUr83TF9iOY1/NEkI37H -F7Nlwm+ICf7zFqbqCh43w6KLqafa/cxGVHEo1lcvTyC8Xjk9v/3sWZmysQsyi5aW -/D2q4O60Uqn2GluTvHcBK5R9X3SU099wakTu5wECggEAUljjOFu++FA4g27dT2NN -0HqoBgG7oJtbJKyJtlHtp2yL6kGlfrZUf4PvvmjJxdtxkfO+QKNewvIwmy+J+TBK -D5Py8nO9wYTtvLy9HPHk7hkKzbMilyx6/AUzFJG/34HoLTXpu6u0ApyPZ5nCAokH -klgzPq/2mfHEwnC4HHjHgOaG6st32fx61lrW6bLPa9G47pc7aHlQVf0xrTaCUBI1 -Ex+7OuSkPw9DBHzm/SXHFjHh7tgMbqehUGh04YPrKG4zuEbaFHCKx+AiMAmREo9G -qLez+rt/OMUCldcnrC7f2QT7RlQZ5OO1ZQFjGfITUft3Kp3C2XCA5AmwCh+yJGEq -wQKCAQANvxxFh6VvjU2+rB8Q4mDzYdr9OFTWMag3SNjBwwWoSXbL2wXPE5gFpzKj -yvEbjmOgzIRABt6Eytx32p0pC5UFIey5PNu+/4ejxiiQdKSLQbqQavKYdfGgyZ0/ -JVqNKiiEJ0b9VtqhAG+Ye1mHZIBzXncWyBSZtxUGVuLG29uKbBo4ufyKauPd3dDv -wR+JqEmAg0ICIFR+q81dEWY/gKsyyI5hMYTTsWge3l3FAdwMZEn9Ek0nclSb3dev -ZiVlFvMZPdp5IwZljClRxnyto7bOTw+X/RMuVLB6p+v3URY4oUSL15+RNODn/tWM -zJOG+48NgohVKfBhGN7JyxV1dq/X +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQCnfQMbGQzzWl17 +NuJQannAeiYFX9mcyKi0ywS1BOsNpDn8SRgeW7Ymj8EqMYIUv0VK5QDpsJKUQ4ZD +BE/fdrplyXhbR0/aMpAATxP4AjnzH0pw49aIe/n3KTdBZj/KF52qO7WG2rcJ5GSV +r8Z6H8FeP02GRt2iOAbLqV5/k52gBEEzjSJ1Be1DiRAMOQL/Wahyo1XTfvB0UeG7 +nIFNOFiTdQmgS0s6OUYWns8J2jcSO8XoSKCxuNz+ZzkisX7xicRFjkweLKNLuYmS +4U1xwd19JMizaCRoK8E6NnOGVrxP/r9am5ft3QgC6AIkzZvNcylcHkkhZo5++X5q +C18umKjPgxCOzAop/pGeiItfkjnshccLPgoPBI1W/gO6puxDaTV3HFGZdy5rtV/6 +MxjEbyUf69aKaBEY9d+mIw0OB8TfyqzNSoU578rTbuEqwiG2f1IDe0KMB2xkikDX +GXPzYYRVdm4bmgVmY3fjpoiM06+aSN1IbYEnmvuB98z1nf8VyRdR39jVk80udfHH +dQWuxTBMEHBtAjT6HC0tKIkeGKB5kmozPIVL+6EbI0JyuRVB6wyCrmnpv0Mw1NzS +F5NMJiYD/5ScSKhnFogYEstq8Lgj5atyrqi703bNGDVGmbTqChBG133r30WNT9JJ +4rzSKznDJhoHOgQqagkzGgHsKeAmiwIDAQABAoICAHkAXOUP1QZe65hfz2LPecRv +utY5KCsX4KI05eKtee9yDR5R5GXSVidHxgLon5TDlpkEFwO9uDf7DJ2QGPBVg1aU +FirDu1HlI5nFh6SuXxVhLtOeFtil0LIaibvq1fz30MUyu/OAQaqY4X4u7lI+bOHd +E/IFcouGtIogg4/hoof/aueGeDVZIc+fzwM1kQ/Pw12G2TOhyrAOk+mJqPST15I4 +hMrUerXGuPcQpnz0tMKsgk9NYSLkbmwxQNrqps5zfGPP6PgHwbWshlKiCOQ9bfnC +QGk0vNCxg7i9q/qK4SNd5Prd3AZRoD8RRLM4A+6K23+ctbK2uA3Ny+Fq88njKlkN +jYxHPlkZ+b6nGzYxwZ4pbVYR/rpmrKdnrns1t4l/9GCOwMhDZe1jnRZaimQOoQQs +8hHMwxDisqsOjzd5ozQU3dVgxmG/n0jGjjtBIVp8usGe//AqRmZ7SVRNrglgG6FI +vqYxwCvum+DEJ4X5ONDyyKddmccGkCpj1lX6xPBtEkp7VupKx4KHW6ufteQYSkdh +U80RrCPaIKoFm1y5Jes9vOtICtvRk4PVNfLXBBycn0WR6aUQEkFHDvSHFlCnROPD +UdABH1r3bSMruz5vQrdIA/6XGHXzjHmq1WN0pKynwberFwazAxDlD/1G1ZbPD4Dg ++Z9cpyZ7Tlxj+T4hSfExAoIBAQDT8GU76CpiHL1VkIA9Khk3j6Nglq8ALQjRUH2S +ReqcAhvpUiSr6G9lt9PWGm5iWv4qpRoln+HyBDXpWOxaTR/PNi07J0NeTGxk2+in +IYNMDg9IILDGlbv5tKen4yKD9Yb7VHD73DxhDbSs2U+o49eTdfXmlQfErd15alZy +m19Xl2r2jPtYF8diPEp2m2dUamArnA8Vd3jdFAxTsfP8eNeAcCn6R3jEfIFyMm02 +X2H4iGO4Ec1ykVdiFxiemElLjm3Z7vVek88Md0KgjuAxhv5AuogRYtgMIMlhNhco +hfNgXzVtvxuDFJ3J2rlA20T0htSz6xsy9ZVUYRWkDIf0tPRJAoIBAQDKTufkXmcC +wXhkjnUMmYXqnmul6CPeYbXyfX1CZhtAlZSXtzAJ/2e7eRlqb7iFJuYfTdpkbN6i +pMrSb5wfcPtl8RRC+MERMCbe/LB4DND70QSLy/u8mWIsjbkdIT6SCi8rlNzfaL7U +cOb4uzmuXOhZHvdw8q9YTjXlT/EjKoMa2a+RggVLnICTm5sG6tsfkgiH5+DSsFCL +kKQt9Gtc66Q8HfyJW4ljK2JAOjYR7w3+bDXsodrxUQrl5maMALeBdkenc9uzwsmE +2an/R0NQToJJku+gRKVbJxN9jyMEVMygmygxfmW5qUyFZ8W2dd1Rkcr9jzNdVPlT +a6KgEGHPP5wzAoIBAQCqma3DpUTIqT227JRtp7+Yu+TVUTYZPCcG5nXOEr2hSwlQ +rTCbuIRDKtA4XhpQzdIeXbxIYQStnboP1eabYc2jLIcIQLi35WizX1lNf2qDBCZE +9xuVHt6rSEJUoD8eXbuEABraghOQREoVgO/gkVbsel2weHJCXXoTzAc+RddfWKFf +SWjhJnL2nnWKN9nbV62GLR7vNrZxrzuk+2/c4SEHYEJKFtIdx+MjG3hR9kGUn6U1 +fA8Wk+v1J4ZH02ncig/fB703nl9iN3XIbHoHJBTx4bS52gjy6klwGOxXUEvyXXFS +oCzzPNsuqwPIMzi0ZPw+v5erU4ga3fNflD60Oh0RAoIBAQCXV84MPj7rhdZNy3Bu +246eBKNdOrtSimA1poEFIiNy/jNqB+WNJR7x1VcZE7jDC2WNt40QIY2vuH3uTQZL +UxcOnPneW/76n74EhJ5zQIs6RpQTDKcm4Mvbrq3zx8HqOGovPS66hr5zaH6xRkaR +VPmQaiULvtFDy0ZwZIxtFUl81aqMvOq/NLXPNtITq6/+/x0YpnO+yZ2Hus3Hfxiu +K63yNzCLhQnTQUo/6Aw5AE/ErCju++oxKsJvWBwQ0hx1YgmakIakBK0CkF6nFSWb +NxAqgBx5FcFp3mKrRGAaxmFKKKg51me9K5SOHCKBK81ETz++zdjMElxudo/zFC5H +fzuXAoIBAQDTFfoLQ2XC0PI5x77zpse3zOvIEvsh3e3tLUhOHLoLTUqX0Nvp/7m7 +ohTHrPU4lf4ERInL4Kz7y2iI2yjRiKD+ARYHDYVx3QgttNeUFnsODnseJTnBcgFB +EWlThVuxbvEzJAZXNVtKTHmCLKFHqc9epyfdI59uD5lXQjvNp4uNEPiEE1AX0d8d +0OuFfQ6bSK1rZNv6IkRPTl+LCWuyHvZNWO1WZ4IUCg4XLr1IXtQaRIMRDxyxbHwc +6vwk0JtRxCt6wvIb/YWUK6qjjbfcfH70iDComZRTyXLGrr8w+vtmqvD43MUGIEmq +XHot6Ki4FhjC4ks5oT9m2q6TerUyIg8Z -----END PRIVATE KEY----- diff --git a/testdata/x509/server1_cert.pem b/testdata/x509/server1_cert.pem index 3e48a52fd108..ed6bc02c4ce9 100644 --- a/testdata/x509/server1_cert.pem +++ b/testdata/x509/server1_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIwMDgwNDAxNTk1OFoXDTMwMDgwMjAxNTk1 -OFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIxMTIyMzE4NDI1MFoXDTMxMTIyMTE4NDI1 +MFowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMTCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBAKonkszKvSg1IUvpfW3PAeDPLgLrXboOWJCXv3RD -5q6vf29+IBCaljSJmU6T7SplokUML5ZkY6adjX6awG+LH3tOMg9zvXpHuSPRpFUk -2oLFtaWuzJ+NC5HIM0wWDvdZ6KQsiPFbNxk2Rhkk+QKsiiptZy2yf/AbDY0sVieZ -BJZJ+os+BdFIk7+XUgDutPdSAutTANhrGycYa4iYAfDGQApz3sndSSsM2KVc0w5F -gW6w2UBC4ggc1ZaWdbVtkYo+0dCsrl1J7WUNsz8v8mjGsvm9eFuJjKFBiDhCF+xg -4Xzu1Wz7zV97994la/xMImQR4QDdky9IgKcJMVUGua6U0GE5lmt2wnd3aAI228Vm -6SnK7kKvnD8vRUyM9ByeRoMlrAuYb0AjnVBr/MTFbOaii6w2v3RjU0j6YFzp8+67 -ihOW9nkb1ayqSXD3T4QUD0p75Ne7/zz1r2amIh9pmSJlugLexVDpb86vXg9RnXjb -Zn2HTEkXsL5eHUIlQzuhK+gdmj+MLGf/Yzp3fdaJsA0cJfMjj5Ubb2gR4VwzrHy9 -AD2Kjjzs06pTtpULChwpr9IBTLEsZfw/4uW4II4pfe6Rwn4bGHFifjx0+3svlsSo -jdHcXEMHvdRPhWGUZ0rne+IK6Qxgb3OMZu7a04vV0RqvgovxM6hre3e0UzBJG45Y -qlQjAgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFFL5HUzehgKNfgdz -4nuw5fru5OTPMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh -bXBsZS5jb20wDQYJKoZIhvcNAQEFBQADggIBAHMPYTF4StfSx9869EoitlEi7Oz2 -YTOForDbsY9i0VnIamhIi9CpjekAGLo8SVojeAk7UV3ayiu0hEMAHJWbicgWTwWM -JvZWWfrIk/2WYyBWWTa711DuW26cvtbSebFzXsovNeTqMICiTeYbvOAK826UdH/o -OqNiHL+UO5xR1Xmqa2hKmLSl5J1n+zgm94l6SROzc9c5YDzn03U+8dlhoyXCwlTv -JRprOD+lupccxcKj5Tfh9/G6PjKsgxW+DZ+rvQV5f/l7c4m/bBrgS8tru4t2Xip0 -NhQW4qHnL0wXdTjaOG/1liLppjcp7SsP+vKF4shUvp+P8NQuAswBp/QtqUse5EYl -EUARWrjEpV4OHSKThkMackMg5E32keiOvQE6iICxtU+m2V+C3xXM3G2cGlDDx5Ob -tan0c9fZXoygrN2mc94GPogfwFGxwivajvvJIs/bsB3RkcIuLbi2UB76Wwoq+ZvH -15xxNZI1rpaDhjEuqwbSGPMPVpFtF5VERgYQ9LaDgj7yorwSQ1YLY8R1y0vSiAR2 -2YeOaBH1ZLPF9v9os1iK4TIC8XQfPv7ll2WdDwfbe2ux5GVbDBD4bPhP9s3F4a+f -oPhikWsUY4eN5CfS76x6xL0L60TL1AlWLlwuubTxpvNhv3GSyxjfunjcGiXDml20 -6S80qO4hepxzzjol +AQEBBQADggIPADCCAgoCggIBAKtk472NGPcQhDL9U6wsYWGOachAw5XX/a7lUBh/ +yowV+qD/SRCbspeBfiNdMNoXh/LPgyePWhAhskT2XaSJZ5cYD6VpI9Q55lFnFzR7 +Q1bw7BLaD2q83BJkrUSGyDnxH+LdQc2+Gq7rj1PIpIDBaJDtdd8U9bcpP6rH+S9Q +yGQw5OniPCCsUrnx8ym/3lAhKdn2OWXLq+F1avim8AN5dQj0fvAI2kMQdswKgY2M +bs5E932WPtjwLbe80A7RtHPIrqvsdVIoaZav7g3liKekisBuJGLMtTX3hBct5an2 +eKu3Q901bEQXeMWrToekc+DnUsmQ5TwkXatWiE+/sMWg80KNyWt8rulI4ATF8go8 +7Jl8duyb/jvULXjTRdDae6w34gNZjq9jZH2qSVIiLV3Jy0GadyRVJDVyE8Lz8EiI +XkbhgbjL8fpNG8cjN+58sK3TNDuP480A/Pi/9I1BoPYTSPCD6H1KPQJwF2GZVmgj +epF328/RGjl0bfaY458RRYZafydblBpIhDsLBBRmDMkFh4SghAgOwoQBjsEZpCmF +efzzPmJybfloBdmBiqfrEXP8t3J4jBzP5+qhYZRxHik0ignOWwyDtQQSUa2JTJoE +/ET8bkO88XLL7hkAlF+eLVV8ao4oXRh5yjf1c4PvJ/Zfr80mYJYOvOlA8Me9/+A7 +jZr3AgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFJmANOTd7MASs5/K +mnBYcpvzmgLcMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh +bXBsZS5jb20wDQYJKoZIhvcNAQELBQADggIBAFH3XxzcRkP5053jgcyV1/L3hNY3 +gAKxdDjYSw9uk2saJXz4gjfvc8xyIsWv35QU2yzzvN3xaGfDP+qVh1fNmC/7DdUF +LJzYTJb/wm4atVM6oYzdBhu/b3NqtMPb06NFKqyEX4SSN2QjeUVUF1QgAkjmsCiE +79NOuoCO0aWcxgdKd18Wl9MLtG/PtlCMLRcPlx4FX6OYLcOeqFEtcOQXKYDWej0r +9m8JQ2DAGRSa3AOUYskN12GacEmchC86cnlMsL2AycnX1YzOBawgp4KKSur40iPg +S1+8LRjZA5Dz88+a+DXb619ckABO7v8b0AVlbkVVmaXNnhBEU4vqdfQa4BygaGGl +BG8hYYMoNBHpNDr8bBWwwl/WVpGUIMHpOnIJnG5gAGrNiAxH1/6DYFC+cXEOH5J2 +NpkJ5O3Jm9a1xtwBs3tSp8GEORqVrpIjiK+bUWQacss9nsyyO6Oo0S33javSR8AN +nJrGHBE01/QytEpJ53d3N0btZrByhiFZkh8BG4NdhXZsAaQjVy6EEHxfJBsfl8Z6 +UGX4T/TTkASNDWA4B+/nRD/BxrcSegDb8fE34GY9M0IWgQtmMIdR49bOxygzYMFK +lrh+dwGqZ9/xqJ3ro7sYphhJ+Gk5YL5lkZygF2/F9GJY3zOcKrFUalfJgaqKOaTO +6zW3ZjSyDhQoFNR1 -----END CERTIFICATE----- diff --git a/testdata/x509/server1_key.pem b/testdata/x509/server1_key.pem index e71ad0ac9753..5e5331ab5d98 100644 --- a/testdata/x509/server1_key.pem +++ b/testdata/x509/server1_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAqieSzMq9KDUhS+l9bc8B4M8uAutdug5YkJe/dEPmrq9/b34g -EJqWNImZTpPtKmWiRQwvlmRjpp2NfprAb4sfe04yD3O9eke5I9GkVSTagsW1pa7M -n40LkcgzTBYO91nopCyI8Vs3GTZGGST5AqyKKm1nLbJ/8BsNjSxWJ5kElkn6iz4F -0UiTv5dSAO6091IC61MA2GsbJxhriJgB8MZACnPeyd1JKwzYpVzTDkWBbrDZQELi -CBzVlpZ1tW2Rij7R0KyuXUntZQ2zPy/yaMay+b14W4mMoUGIOEIX7GDhfO7VbPvN -X3v33iVr/EwiZBHhAN2TL0iApwkxVQa5rpTQYTmWa3bCd3doAjbbxWbpKcruQq+c -Py9FTIz0HJ5GgyWsC5hvQCOdUGv8xMVs5qKLrDa/dGNTSPpgXOnz7ruKE5b2eRvV -rKpJcPdPhBQPSnvk17v/PPWvZqYiH2mZImW6At7FUOlvzq9eD1GdeNtmfYdMSRew -vl4dQiVDO6Er6B2aP4wsZ/9jOnd91omwDRwl8yOPlRtvaBHhXDOsfL0APYqOPOzT -qlO2lQsKHCmv0gFMsSxl/D/i5bggjil97pHCfhsYcWJ+PHT7ey+WxKiN0dxcQwe9 -1E+FYZRnSud74grpDGBvc4xm7trTi9XRGq+Ci/EzqGt7d7RTMEkbjliqVCMCAwEA -AQKCAgEAjU6UEVMFSBDnd/2OVtUlQCeOlIoWql8jmeEL9Gg3eTbx5AugYWmf+D2V -fbZHrX/+BM2b74+rWkFZspyd14R4PpSv6jk6UASkcmS1zqfud8/tjIzgDli6FPVn -9HYVM8IM+9qoV5hi56M1D8iuq1PS4m081Kx6p1IwLN93JSdksdL6KQz3E9jsKp5m -UbPrwcDv/7JM723zfMJA+40Rf32EzalwicAl9YSTnrC57g428VAY+88Pm6EmmAqX -8nXt+hs1b9EYdQziA5wfEgiljfIFzHVXMN3IVlrv35iz+XBzkqddw0ZSRkvTiz8U -sNAhd22JqIhapVfWz+FIgM43Ag9ABUMNWoQlaT0+2KlhkL+cZ6J1nfpMTBEIatz0 -A/l4TGcvdDhREODrS5jrxwJNx/LMRENtFFnRzAPzX4RdkFvi8SOioAWRBvs1TZFo -ZLq2bzDOzDjs+EPQVx0SmjZEiBRhI6nC8Way00IdQi3T546r6qTKfPmXgjl5/fVO -J4adGVbEUnI/7+fqL2N82WVr+Le585EFP/6IL5FO++sAIGDqAOzEQhyRaLhmnz+D -GboeS/Tac9XdymFbrEvEMB4EFS3nsZHTeahfiqVd/SuXFDTHZ6kiqXweuhfsP1uW -7tGlnqtn+3zmLO6XRENPVvmjn7DhU255yjiKFdUqkajcoOYyWPECggEBANuYk+sr -UTScvJoh/VRHuqd9NkVVIoqfoTN61x6V1OuNNcmjMWsOIsH+n4SifLlUW6xCKaSK -8x8RJYfE9bnObv/NqM4DMhuaNd52bPKFi8IBbHSZpuRE/UEyJhMDpoto04H1GXx4 -1S49tndiNxQOv1/VojB4BH7kapY0yp30drK1CrocGN+YOUddxI9lOQpgt2AyoXVk -ehdyamK4uzQmkMyyGQljrV5EQbmyPCqZ1l/d0MJ9DixOBxnPDR9Ov9qrG4Dy6S/k -cH8PythqHTGTdlXgsBJaWEl2PyQupo3OhfiCV+79B9uxPfKvk5CIMVbnYxKgu+ly -RKSTSX+GHVgNwicCggEBAMZcwQIAA+I39sTRg/Vn/MxmUBAu3h2+oJcuZ3FQh4v5 -SL80BWEsooK9Oe4MzxyWkU+8FieFu5G6iXaSx8f3Wv6j90IzA3g6Xr9M5xBm5qUN -IqzF+hUZuKAEMY1NcPlFTa2NlrkT8JdfQvJ+D5QrcBIMFmg9cKG5x9yD7MfHTJkf -ztMDFOwP3n7ahKRBowfe7/unAEFf6hYFtYjV+bqMDmBFVmk2CIVtjFgO9BNBQ/LB -zGcnwo2VigWBIjRDF5BgV0v+2g0PZGaxJ362RigZjzJojx3gYj6kaZYX8yb6ttGo -RPGt1A9woz6m0G0fLLMlce1dpbBAna14UVY7AEVt56UCggEAVvii/Oz3CINbHyB/ -GLYf8t3gdK03NPfr/FuWf4KQBYqz1txPYjsDARo7S2ifRTdn51186LIvgApmdtNH -DwP3alClnpIdclktJKJ6m8LQi1HNBpEkTBwWwY9/DODRQT2PJ1VPdsDUja/baIT5 -k3QTz3zo85FVFnyYyky2QsDjkfup9/PQ1h2P8fftNW29naKYff0PfVMCF+80u0y2 -t/zeNHQE/nb/3unhrg4tTiIHiYhsedrVli6BGXOrms6xpYVHK1cJi/JJq8kxaWz9 -ivkAURrgISSu+sleUJI5XMiCvt3AveJxDk2wX0Gyi/eksuqJjoMiaV7cWOIMpfkT -/h/U2QKCAQAFirvduXBiVpvvXccpCRG4CDe+bADKpfPIpYRAVzaiQ4GzzdlEoMGd -k3nV28fBjbdbme6ohgT6ilKi3HD2dkO1j5Et6Uz0g/T3tUdTXvycqeRJHXLiOgi9 -d8CGqR456KTF74nBe/whzoiJS9pVkm0cI/hQSz8lVZJu58SqxDewo4HcxV5FRiA6 -PRKtoCPU6Xac+kp4iRx6JwiuXQQQIS+ZovZKFDdiuu/L2gcZrp4eXym9zA+UcxQb -GUOCYEl9QCPQPLuM19w/Pj3TPXZyUlx81Q0Cka1NALzuc5bYhPKsot3iPrAJCmWV -L4XtNozCKI6pSg+CABwnp4/mL9nPFsX9AoIBAQDHiDhG9jtBdgtAEog6oL2Z98qR -u5+nONtLQ61I5R22eZYOgWfxnz08fTtpaHaVWNLNzF0ApyxjxD+zkFHcMJDUuHkR -O0yxUbCaof7u8EFtq8P9ux4xjtCnZW+9da0Y07zBrcXTsHYnAOiqNbtvVYd6RPiW -AaE61hgvj1c9/BQh2lUcroQx+yJI8uAAQrfYtXzm90rb6qk6rWy4li2ybMjB+LmP -cIQIXIUzdwE5uhBnwIre74cIZRXFJBqFY01+mT8ShPUWJkpOe0Fojrkl633TUuNf -9thZ++Fjvs4s7alFH5Hc7Ulk4v/O1+owdjqERd8zlu7+568C9s50CGwFnH0d +MIIJKQIBAAKCAgEAq2TjvY0Y9xCEMv1TrCxhYY5pyEDDldf9ruVQGH/KjBX6oP9J +EJuyl4F+I10w2heH8s+DJ49aECGyRPZdpIlnlxgPpWkj1DnmUWcXNHtDVvDsEtoP +arzcEmStRIbIOfEf4t1Bzb4aruuPU8ikgMFokO113xT1tyk/qsf5L1DIZDDk6eI8 +IKxSufHzKb/eUCEp2fY5Zcur4XVq+KbwA3l1CPR+8AjaQxB2zAqBjYxuzkT3fZY+ +2PAtt7zQDtG0c8iuq+x1Uihplq/uDeWIp6SKwG4kYsy1NfeEFy3lqfZ4q7dD3TVs +RBd4xatOh6Rz4OdSyZDlPCRdq1aIT7+wxaDzQo3Ja3yu6UjgBMXyCjzsmXx27Jv+ +O9QteNNF0Np7rDfiA1mOr2NkfapJUiItXcnLQZp3JFUkNXITwvPwSIheRuGBuMvx ++k0bxyM37nywrdM0O4/jzQD8+L/0jUGg9hNI8IPofUo9AnAXYZlWaCN6kXfbz9Ea +OXRt9pjjnxFFhlp/J1uUGkiEOwsEFGYMyQWHhKCECA7ChAGOwRmkKYV5/PM+YnJt ++WgF2YGKp+sRc/y3cniMHM/n6qFhlHEeKTSKCc5bDIO1BBJRrYlMmgT8RPxuQ7zx +csvuGQCUX54tVXxqjihdGHnKN/Vzg+8n9l+vzSZglg686UDwx73/4DuNmvcCAwEA +AQKCAgEAknXlUv42vjGD9pqZnMBT+uyaooANYoevBXx5ZGYXbHv/rwJXqnSSOXtz +kb651zRSfQAswGp0eOKClwG8ZbTxK6FpBV2CO4G6ugcRQkyu76Vy5m0mzXxTxvf3 +RF60zSaqq8+MwsbXwHAVC3CielBMDcSNfDNKAdmiyUqXOoKaq1tI0j/8R6NaEgGa +XCvUSr78J4CL7dwMpd4TqiXlZeKtSxi7PF0kPjjce2Hi8VV2/pbaspvoWrNrLd6Q +IIm83VA5SzsFyk40ZIs0LvXdP/yQgP3d4/uwQkyfuLsEzaeL2JkDyg0z1kAEeU35 +DlpOl3q1OP+zlCAzVw3b7+ILqeXu2KOLPBCoRTo2wWNKutxcExmA0oIfMSgyb/P9 +36bNsdmT6b/6C9ZeJpsXZWedx7bmlEYg1patfk1CgD6WMynsl+fQ34F658v/JWzY +b8JpqAG+2ov4j4EyRnwK267/6u/Yw4CvQw+8giKIDMv6W81b0GWpCBT7PkTKgUaW +1Hq46z6xZ/NttiTU6qMsgUskFheU7IuO1KHl1kpLBERio9S87ZxkjLGfvxl4+sS7 +7OdgIVsM1d50RYy4pipNplQXw998kitPvNFcSOK5vnxqW5sz4SRzVAUl5sAqJ5GC +0MPqq/I+H84BBQwwviQ9WsBuA1+YW6NxfY/mldAzAjg3/yUMwokCggEBANTyvgnw +3SjBOKzg56QXRs1L2eqQzqtDzyOn2BXlMvT3bJFdqOHROhqqD0oSSbYZuCoJAJPh +/W2bqengGSbsGwKKLHDN0yPWR12QVq3y9gbK/L6Qktjp2zDhHqNgr+4SSSl0gMMF +bz6Nzn+0SO6C3m+M6hAgfsuizIhSCxBSSLccFSIT0ZiYRzq7rN8FlirpSIhd9cQ5 +B3q41lebUHpfciPr7K+psmCXO9NqtSvXtcMA/n7GyIGPBVDp8kMUwzuH9OVOkZkK +Z1a42uuYKs/zgnbXV7kCZ6iBQkt7A2Scv/IIwdeaKxfTv87e8UqMkYPuf4wgqLq1 +USoMoHdz6JmQnw0CggEBAM4LfX4PIIGT+OQG5kn54ZcsqerPljPj19EBe8U3jyf3 +OzxDkSX0f2fmBVDjqj8QRO/PcXQbGUXWhJN1DvvuJITE93Y5Pj8DJX3CfLEnjE+b ++sfuSxNawH9NwvwNt42NWDleMAgfMot3J+MwlXb31BWixCMj41Vl4VrkHEzX/M6R +aAWcTqeY59KumtOvZO/4U98VvNyOipLHWcWZuwJLzeUjvyZ+hJQ6kBGJJYNezYm/ +qhHZ5k4bz8HAUSgih+Yb3DNmREk3YqIM4Skq843YdF8SEnFw/4b7PKMfmBXy3X72 +f51wdCiSkIm9o+/QLaduExQd7AhmFO8avZa5bRNNwRMCggEAKY/DBX+sOoMTw7IV +o9IjMHhobL6ch5Kxf/0HUKauPl94IhsMlh5W39NnLobJOjBk4FdndHV8GAN0sz/Y +yN72GpXLPKz/U5RD04ATWtn7qLG/iJYBAzMJY83cQ/jf/XA2NVAWvXl3D9dvgT83 +qM2ECnOPT1x4QthgYQ7aN/JHXO2vNjp2AvldlZoBkHmvqGpljLACAq06x3oB45Fd +sLSmO1qVlGdjeDSsKYQ/HfJ4+DlecnHrulWmrPcsIGmR/TF427Rs+FiueJ+VorvN +R074nKdE6MgOYTXxMXgt3lo1oFCTPLhLRtg+LGsY3vr2f7Bx1nCdXet7juBuBUJr +GGXAlQKCAQEAxNVHMfijdgX022kX8A2Ni4x4Wj+q3rFHR3viUDnOQUC2TtDBRX/3 +gjrEU0zaI1qYcHs8h80nbIcMqY1HHjaWnltHh6IRq8KGu0fjNJ1yNc7tWLd08u1c +PYD8xysXcVtYr50hx3B+KatP6IJOFpOUAIM4WdV74+XqzZhizKn88R0JQWrb3NF+ +jM6OS7EffPs+rDuo6w4kpSlZwiIk+4GNFNv8THrKjowPeyEIPCKBuZjmkB0YHQAG +jbH6FZw/NPzidBu7GjKVv/cL1fcZKiVgrj2mbsai5MD3YWHaOQWEwTgcGzwFS4kQ +GPWYOY0nP+4wvaQECtXyI6To/qbu42UBDwKCAQAd+/H4mepB8cgchZobFco6yTt3 +qgS1D9sOFgWYZLkxIuQTQH0C1cLHXIiqf0NrJAOBbaJ+vVuCrqv2MUASaxKoDK7h +1W413yKIELWbTk7yEttw0M0T2PXmI5dNdKuw5I5hw+MmjLOFmyRzvX870ihDnM4F +MISxV7K45t1EHjMsz66fMc8BIkophwK3/7FSok5XhYdLQdQS9Rshv0PXQmffkVUM +UTlrwgH/3WGRhkFbmcdBMlawHQGvjiyZ+Gz+wF1uhzEYweT6wUfaHZePxX0hXqom +WVS6ojlUji6NNJqFN8DB4q8V5/EShj4fpdjenDap5IxFgDSxgSCShR2FGTCW -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/server2_cert.pem b/testdata/x509/server2_cert.pem index dc20b468e5d9..753065b49ad5 100644 --- a/testdata/x509/server2_cert.pem +++ b/testdata/x509/server2_cert.pem @@ -1,32 +1,32 @@ -----BEGIN CERTIFICATE----- -MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwUDELMAkGA1UEBhMCVVMx +MIIFeDCCA2CgAwIBAgICA+gwDQYJKoZIhvcNAQELBQAwUDELMAkGA1UEBhMCVVMx CzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNV -BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIwMDgwNDAxNTk1OVoXDTMwMDgwMjAxNTk1 -OVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIxMTIyMzE4NDI1MVoXDTMxMTIyMTE4NDI1 +MVowTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3Qtc2VydmVyMjCCAiIwDQYJKoZIhvcN -AQEBBQADggIPADCCAgoCggIBANluCTNFJz8gsMgn2ixQuk4YphdLfbsgOlk2lRFx -mYBpfD2hfZpnr6c67WNIWBuvMy57z+FWcmmA2iVabEs4OGPaQj5R6cngai01QNPO -d0gPpcAW/4KuVAYOYiYWSrVOTj8aTZm4buG/VMZMUKUMS0JNXSuYLZrgD23Rsr5K -j6q2fqRFtcC89QW9opafa4oTmkp6Kz/WrphF4EsK1fbelZ8xQ4+TOkIJegZMS+vA -r3itgA3ha1xqzUU9+A4xTg8HybRzJMAbtzO0DJMzmfDXXwIzAsdsYerDgaoYlBtP -5Fnod19g8k8NIJduF8dPRfnyn8fFVisT4fWet59/1jcXUbdsgdPLuuY59sxT/C8o -HLfn26w4Wda0Sc2XN5qhXwezkPX51mOw2siP81jFeHRQE+J0IOfxjfpdbI1+xdIF -vsu42NdmYa7a7ejhilZxDYRZSaJLLYE/ZDiGfTBZVoVKRNbM0EZ7VRCN9pN6i5jd -WsHCjdq1u9rzplA0D3KrycUvlpZc7xFaJxTiVFGiJugJmTJoUpQHnF6chZsGukhA -pypSB/f+r4tPa81N5X9f9vG0WBXiKGaoWVJXmNOQHaqAYz7maO/JCetjtUn6IH7V -Ti0qK4yeVh/5GZzC7xFfTmO4oWbz6Cb9FKPSsVjvo/n2Zo0e7CSVKc9oFFBwgjg+ -p6bvAgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFNpFUJbVy5fb+Uvm -jKzuUDbuWctCMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh -bXBsZS5jb20wDQYJKoZIhvcNAQEFBQADggIBAIaUCF04BpWeQkeUsslTSN44Q95U -oNlRD19fNXWF8eae7Wl53dFkRhn2nyqx0uoHvFZ5oRhF4v8kzM1cyW4RyLk9WTnh -Lmg/jfr84bSdWvN8nW5T2jNvq0ltSY414MFu4fHf8/GMbpIKtafFkisFXmhKm8Uc -zVilTn9Wn087Lkg3FHYVU2v0oWfupM5Qvq6tvZxT2v+7nmES6Cip8Z9U7km04yxV -hDy6YFdz2UDUYlZaQCsLPmaiIxR/EclSsL6KnMW3UjMyxX8Eft1WPwvTzlQKQFDs -uEfbq+Cl+cogMaGq1VvAA9cvCUSa1hTathWayKH2q1mPH8sqtbFyged7XXh8mkkf -8qeYTqfeL74I405Gl3u3/EjVnhSLpOqQOgn2E5HnV0bZaJmGHdU0DIvOyKauinyg -U4hnL8WBv5en9owQvE+DrivbcG9brqEY3wot0XNzB7pxXjrWdw/PMc/HNPbBsT8s -Zg0gwxwvpffGemc1L8tiM8aHOp8eR1oVr4szuNDAbAfdEgpwBctXs5JJg81zsmGe -2jJfHFAeqwhUZgCoF/FjJ+IHxOFZx9IVwrlawPadIFgVh2I0rFUcME0B1/Vk46Gg -BOiuP9keVX+qhKtqjnfabN9l5iX+zpniHIarke2o6W7nYIgdOtdbmH4YNZxjyidj -9w/3d/4ItCavbKAn +AQEBBQADggIPADCCAgoCggIBAMqkUFp6xBzIksawPZpDQCZS+ZE/Pjfab4q7CUd/ +pN0Ss0U9MRBBnYj767qvwrOGQYdpkK0NWh+BtUOQHaqsnD+ykr1x2i27uMvYBnkn +91t8EmfW8u5cj2lZGM8SRXaiCc2tv3ZFDKeizIp5BcxLsTubRZNMTOYxUCpFoABo +d6DGYXaU6LFNbIhXQ/vTlbDa85a5EjS5SWeBzEEFgBHHQDdjh+cqS4bzAgI+klYT +uYM3PMbUuOg49hVQjW1pXSGO8Yvha0fzZP4upUKi6h1+/xBUZdDiWuynEJZuYPAC +xqgnmgSkexNLJwrg/6JPr04TfbKagffKDHKhloI62CZJ9/VRwo7OF8dsl1Y+cr3T +XjDtsxmypDIi31szDYhE5V1P+FAGP3sewQBRh597x1RA8tGE3ijIV6iqSlSo/Lpx +HaVM5n3PWY5vUNFWdbawMDuCUvi9GYn3JVnt4YUxjsMxLC39FibzUmwD7JXqx86t +ArPRLU6knrcod72dKRnUGi446Z4dElqMngyu2Z08RB5iYzDwT+xugwki0gezQ2uB +59/968+MI3eVe/QWMwtIAoZuHKvPsAHOb9bAcBvLQi94MWLbBUkAga3XRF6CNAGq +LPNdhznCNYdXMnMdBQHAzu7V5+yIB/ZFQ519Fm4RinCG6clIi326A6GNYenAJ/Jr +Xa5tAgMBAAGjXjBcMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFBfefRYaa/BmcX6g +eSQSMrh0zktEMA4GA1UdDwEB/wQEAwIDqDAdBgNVHREEFjAUghIqLnRlc3QuZXhh +bXBsZS5jb20wDQYJKoZIhvcNAQELBQADggIBALpTW6UICT8SyCY7VNUja51t3+XF +QoD8xKu2qS69G/oDbxe5SF3ldymvkQPqVwnW9e34mjxD1Au4IF8zQWv2EMm96wDD +0Js0/yAMjw6/60f0hF9lpQEqe24W+wgbRV4Fzt3/rybLte+4L9chvq1plHqQ3sfa +s99D7bfPSLX8n82ppNmm0U81kXmtNAw3P+vStBqRgrZNkbkkgsoGZsTgzKuD+H81 +WUzIqmIAfTmkDN47SXYuneULlFNWwtHgTWv4jq2/ptYo75MQq+ExwTDGM168x17u +yaC634INTjfNd04exiktBJXWmAS8K4aYgvHPgcIlzyidR7taI0X1O4mR4qomh5W2 +fVmGkpQZCmkW80whgycY3ui2fdWYOs3XGdfz53fJdN2vWebpUjw7+1owlmqFJhEe +Ct0wqLLeE8rdOfKueh3/xi7CxoeYM2fVjN4gHPojcQ3Mcs7wiJDm0WFaITi6+KDS +LmGhSHKaiXiGKsbLykN0DygDQYa/c4t6NfzoRGWKMhdAcRXicZaxnwPD65psAijv +ZDgONwXeHKgfk7DnE3rs+D9xuh2ciw7lkcbImYmOCMoV88qG0t1uIwlM3xh8S7Oa +DH6q4vj3pF63QS57uRtwCBCOa3xcYKTDJbdRyUKgAejVoz8bqTI8lBjnRTtxlQFi +4ugkg86X1370IY84 -----END CERTIFICATE----- diff --git a/testdata/x509/server2_key.pem b/testdata/x509/server2_key.pem index b0f6ddf70dce..82c947d70fdb 100644 --- a/testdata/x509/server2_key.pem +++ b/testdata/x509/server2_key.pem @@ -1,51 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEA2W4JM0UnPyCwyCfaLFC6ThimF0t9uyA6WTaVEXGZgGl8PaF9 -mmevpzrtY0hYG68zLnvP4VZyaYDaJVpsSzg4Y9pCPlHpyeBqLTVA0853SA+lwBb/ -gq5UBg5iJhZKtU5OPxpNmbhu4b9UxkxQpQxLQk1dK5gtmuAPbdGyvkqPqrZ+pEW1 -wLz1Bb2ilp9rihOaSnorP9aumEXgSwrV9t6VnzFDj5M6Qgl6BkxL68CveK2ADeFr -XGrNRT34DjFODwfJtHMkwBu3M7QMkzOZ8NdfAjMCx2xh6sOBqhiUG0/kWeh3X2Dy -Tw0gl24Xx09F+fKfx8VWKxPh9Z63n3/WNxdRt2yB08u65jn2zFP8Lygct+fbrDhZ -1rRJzZc3mqFfB7OQ9fnWY7DayI/zWMV4dFAT4nQg5/GN+l1sjX7F0gW+y7jY12Zh -rtrt6OGKVnENhFlJokstgT9kOIZ9MFlWhUpE1szQRntVEI32k3qLmN1awcKN2rW7 -2vOmUDQPcqvJxS+WllzvEVonFOJUUaIm6AmZMmhSlAecXpyFmwa6SECnKlIH9/6v -i09rzU3lf1/28bRYFeIoZqhZUleY05AdqoBjPuZo78kJ62O1SfogftVOLSorjJ5W -H/kZnMLvEV9OY7ihZvPoJv0Uo9KxWO+j+fZmjR7sJJUpz2gUUHCCOD6npu8CAwEA -AQKCAgB1i31B0HLlN+EadCEIsCPoMH8qPM+eKFAjBtUT9xwLRfu6veFPZhqaB8tq -TyQC43aB/MFnivqTeut0IixFhgFGSiph0prXXpFIG3AOkaH+vSbYcBZ2KZSXKZN6 -D7cXyVuX1bp6DjEzreJAyeUXNUxCbdyewsh04Ai3UBSXt2tv2PUiDeWyavTzw49w -aoMSxII3HVDgVElTXQNizlrZ+X9d7p4dsnReWw0y9nBc5XB3hyShXGpULhEHC/dc -hN80VPuAqHcHvHQQaZgaxFzGzUg5wiYQddGBv2wL7vmywkArMvfGAn08q1YhR41n -XL3x4G7s6wwogbk4tjOC8PN4GQ09YxbxJVLSyVIHX/v8tYe8H8acsw4LonkawZVm -HOgwMpz/hcm7P+ClYjAVUWZjCJt02svDV9U1BPEdBtOXrMDwlBfVuFxtM4GDkKmZ -GjCLnthpvBXfw6stDKuwE9g+TYVcRMsPhksjE9ZasTTVtFU/qZXhc2bDuJkWaUAd -yAtxBOQYF9mBN4g35NSE7k8FE3HxNDJx+zstodweq6qhinXchuKAeViap94tneeG -hoSt9PgMnOnx7V0wIK7DaGCH3ssxbjRQ2wRLdTNYAzhV+tkeDex2zf2xtOvqtWIC -l5gUSTUnaEYX5wVbCPAJIOAI1TtMe501PfXyZa8wb6p9eSHMAQKCAQEA9DJPKkjI -p+FLBn5iFgGE6DgiFD+K6OyGtr0Mle8D+XFDtlZClF1sjZHacb3OQgDnlSYXZaZR -iN5jKuVJsrhCgm9g0QYDwtp+m7JMMX9A19qZbbK71w9Qi80wuRZze0nktr6RKiyS -+x8VXkeSHPUSw7VbzbE/CCm551Z/ORoU5fXnDstPKk/M8K2NSYywwzwaEkEuu0NQ -/syGxaAW8mThruDAZ4gtJns6IyTmM+8KgkSnbwK5mlOMPhJ+6bHDyeV3OJe2lSVW -ZRA9kzDFAKlotpwRaSwBdu6chCdDhQGn/WlofJHCt2t5Fh9mK89AXQsXfjAh0O1N -7zrU/yeNIXJd7wKCAQEA4/CD665RVUwNffb7fa0vnt6Rkj47FdM/BmWpLnb1IC7L -87Fe9uryaNtghLD6T87vF3MtH2rEfQ2qwR9VRC4MyB5kNvozBVtJbKLy2oRD0/Lp -GSLhjAiKrzu8Dmwv/5iQhrSRr3mqn/eoIx5ydgot/+OzxgH5Q4CGYvzZUcIMVpi+ -eq4/39vLPQoa5tvT+n0G81sCCVR+sBtBbgVq8WaiqW6UunqP+B4+bPG6jbYMjdcD -w+ylakjJdAofl5SqcUcUy0UzI1pEjKnlLYyCyuVMlkhVZoaQiX9TTOTZ0jAXnbps -sDS0fwW1/8J5cSXxIA3q1WVtshst2LwwaCgYlVhHAQKCAQEA0x+v7BnzSZnx+JJK -EUaM9wyZAjKR0aG1Msat2+9C22W+qiVX+Nfw41EHsLDuY4hOsFe3gM3TzmafDFYi -ap79+bF73hu6IrwvHEOBtoWTtUusvPf7iQsXk1b62fr8KsqPMCQAc5sIFI8iNVnh -jKGh8Iya63Jj0ZXpwYW6Bs9y5AK/Gr5SGn3V7PvPnJhDtvf+fmvWkFa57yE7IB+x -1y27JSvxjVFh39RIRlw/nwT7a/cZX1PWzgOPy5bIHRnw8VwvwEECvV4DnOr2oYxX -tqPBAahbMTe3qHDR5zvfF16ANArvKEwJMfV8QdExz4ym1Aqj7BiHFBAnAj82Kcez -MAimBwKCAQADw2LKL1SUbe8DF2LLjmJs4wvQOErNb3Fo76C9baVaZKtlWJZSyUo7 -RPPw/OMFEkuMPZCPJjocPm+FRLkpqQD5BNduuO7CteEedApCZVChXS9QBO1oXHO9 -tOTD8DFSrPgl4TFOjlmszm/uNIB7Rmu//8hmCn5NCQAu/jGwUd3WSCtM5zeSwJQ4 -a8RJ73MufYXx2pzL/qMg0TJhWKGNXr5swbCe64sY85bgQZVs5YaLiPM89tk8SftZ -eRlQbVnrCNtlB71yZfkfwWZRPDKkmuiKyqLuUGZufrWnXVfjSnv5VKyatCQOvM9m -a5WJsrCqcNBhuYz4Fc7J90FtVswhGxYBAoIBAQCzNj4K/OrC5fX2sidbcaEU/9S+ -r8JZeCaxAAFepoFKE0LyspNrsW0CZ3Ana3B7SqfH3nAFLoLxExCgMm1WkvwLp22X -23Gav6cRG4XJjZjyLKW+rcowuhI2Hb6FE2UvshcDzlHpkISpjeY62Qx5gcoLeLlj -eQpqg59wL5ZweCOcgV/K2nrOILlmQR/GQ68XxvBLoj3J46fc+/iI8G1roGI2H6n6 -tRqmOxRFdmchkPfLPYq5Z71LTWD7m1E27k8apttT8P2mfQhZZ3YYERyiRTTYdO0i -0ZIi5+OqzTuZuefgurtHDnJe4rFT3/jZzKmI3IfbuRITxmxSgPd7cpuM22uo +MIIJKQIBAAKCAgEAyqRQWnrEHMiSxrA9mkNAJlL5kT8+N9pvirsJR3+k3RKzRT0x +EEGdiPvruq/Cs4ZBh2mQrQ1aH4G1Q5AdqqycP7KSvXHaLbu4y9gGeSf3W3wSZ9by +7lyPaVkYzxJFdqIJza2/dkUMp6LMinkFzEuxO5tFk0xM5jFQKkWgAGh3oMZhdpTo +sU1siFdD+9OVsNrzlrkSNLlJZ4HMQQWAEcdAN2OH5ypLhvMCAj6SVhO5gzc8xtS4 +6Dj2FVCNbWldIY7xi+FrR/Nk/i6lQqLqHX7/EFRl0OJa7KcQlm5g8ALGqCeaBKR7 +E0snCuD/ok+vThN9spqB98oMcqGWgjrYJkn39VHCjs4Xx2yXVj5yvdNeMO2zGbKk +MiLfWzMNiETlXU/4UAY/ex7BAFGHn3vHVEDy0YTeKMhXqKpKVKj8unEdpUzmfc9Z +jm9Q0VZ1trAwO4JS+L0ZifclWe3hhTGOwzEsLf0WJvNSbAPslerHzq0Cs9EtTqSe +tyh3vZ0pGdQaLjjpnh0SWoyeDK7ZnTxEHmJjMPBP7G6DCSLSB7NDa4Hn3/3rz4wj +d5V79BYzC0gChm4cq8+wAc5v1sBwG8tCL3gxYtsFSQCBrddEXoI0Aaos812HOcI1 +h1cycx0FAcDO7tXn7IgH9kVDnX0WbhGKcIbpyUiLfboDoY1h6cAn8mtdrm0CAwEA +AQKCAgBa3LaS+304Es+Ne7UDmKgJByeUczEoxi9Bm4AbqSZ5Yksz/q4jReinZZ5b +hTfeW5LCbxlKHzSL8BMhClvjDaa6AQ4/F+/mlcfUzzaH2N3XDZkLKpyfOK2tZR/0 +qZKwERQoP4IcO/XirOLeLEnnQwFjYsodtBa/GNmDOtj1leIeGxXUoAx+g+Lod4iq +QENcm7ChoraBIZvCZ7b4aMj2L8uhimWDx7k593itHPVs10dViM0dsoB+0Bu3jvj7 +WEVEKN4yBI+gIYjlWHENohMryqf/4HgO45A1kOulKDUbKYN+HtO2xTHSgt4syJqX +YveOILs5/IHOY7CVLdNY7Z3B/WTKtO4UCzGtFWsD0Ai6rtfnSj2aAmq5uHWDwPHl +3fHdTK6knHdlaPWbeQiBIk4bT6L/JjH38dqBU17/RathjoCDQngNmid7AgSnv5o0 +5ugTCTzzFTUz7FnA9uYcEWIq7xDB5gVFTcvWcARYd2BsLM/9gF1Hh7r4A8gsHj2i +0+7Saw6mvAsPXp0JH+8idBk8khV8v6Uy1arF5aYF8yNus/hVr5fUBnnK6MZ+F2Nr +VOa49n5BhbWm/IsVYdgnnk4uwUx4yNuwMZ9/nSsDEZ6IiX9KZjap6zKNwhoodLV2 +T9WYKC7JMOEBr1CzoQL9JzvyYulZUp0F5SBMbB9kZj6j1gQRGQKCAQEA5QbA1hqt +0iy5KjH26Rok2pwi3z49o/sFPyg4TBOs5Zx7T/iBQM3ZlyOUdGT4uYFX83Xljbo1 +As/UIM1wzCSUbyyHGs7RTuoAWJ35d94UwdfOmw5j5ETAtbA0EVxnLCqbp2ArqC60 +UT/M9Yk9HK8bj6wwb0kZ/wsfwn78Jts8GhtCozPg/c2KWq3ce2bcHMuGhlaWq5Jb +XlHrZBIoL1tYsT8LWe4wc8Agm/w0ZC4rM6B0A8rxdsrPAc3WmlvNvnA8DA7OAJZ5 +j6zThsQ6FdSic8CI6p5vw4pyFZXtdERIjX2jkWVLVdb8adQKhqcglBvYZt5e73tk +a1OfgG0tX3M8TwKCAQEA4oIOLgVJWc9lfWFaLiRIjKlVtnOqfR8Vy9e7i2dBylgO +ZAoVM3ROYLsbDRsMJqMN/IQIuSOMYP+lIeeLoJlcqor49+z6em45Kbt5ZiK1VRoG +78Zi/UZ6cb/vcB17zmeBuUHYEmHLbB8PWbL+qFEipVDAf0q1jD9VnRw6ntt5K0oM +AKPH7jiCxfo3GU62nOZgcT1rwnA0FHBl9vvcr+237a/+NoNkRqQxTX4y5kA5l0nP +9EEWYvOlCKbkYiPKHOZMGZ4MWb0FlFp46KPxiM/x8XxB6NFUacEJUKE27NYgj69+ +5C62A34YtLKptD2+CeAKqxYOYIt3Tj4qJBrJ4m6OgwKCAQBxoXwjvnDnipEEQm4D +EZmfbUBQCw2CQpVD1Ky58jkiYxU7hEx83qVKu7h4V3CgeXAttx0ByJVso7jX3ZZN +cwjCcBFIV7y5rpglX5vawTEDTBOSEv20z/fdLWNoCbSW0T0ROkHu291TQphqaoEL +rkW6bvBJBrgDNn23flGU5clYGpZhaugChOxUOVbfUxV6o/BGzsdKsP7sOTDVIb0W +YfgLWQBEykz34SdMvUExQ0bkAoQNLa/IBK/YcUw8obfe+MiSIvZKjF4bzt/USZ+Y +HTvMuoY0Ag/psNMRqqV5vjdRHDj/doZ+PIBX8YCXdmxPj9E6mLH5l/sm1QKaMZEF +fqM5AoIBAQCPe8lVt72Wab2lpgTFU/CtQhtsv2qRZh6diSRhk2BmuE8tagGyHYwE +1KG3NJoG46VZf54zAWTMkUTe7FlTu7KqyewayYCGC8qkOAEYBQaPSTR5sVdFj97C +rc4UXGjwADt5yk8AnfiJnkdQEAYnQ3ZJ+JRoTkAg/oHSS26K8QaZuIdP5HAi5KNa +nD1JB8bAL2OKeFkJy6ACDo1Y3oUW4ORxadoEWEkuQpaEu1us5aRVxMk5tf1jY2n4 +yBfGX1uJ4Qz18VtrgUTGjGUpIalAfFGMIqVxwSDS+RhYfjdX4fCwdIBSNZDRN5CY +7tB3v+DhSo4XgJpM6CwEYXa6dknK6TPXAoIBAQCo/+MThNIrKwIsbKxyz3Xzg4Ut +6JUQxd3YU1kNcNde8BSuNp+05RcUIc7wpRTrrJwAc9uNHVNjKz73WcpjMh3dHYJY +VF0nbUzM2m+KzLdTJtRTYMaFGJjiHbttetNGJoKomNrMea/vEjZh5WadkBagKBGp +u85H3Ff1vYdMyCTBiU6eyabxc08/ZEaFJaQALjVC4e/mQdluCHyfmqeY1awKLmCK +vf5ZvBC6ISOMMibqcRT5ocjAvO3j+3d9Ce3ExX5U1fu/xYb3YoWQtdX+qIAT2KYq +QG1vDy0VieGJlUiDPooWin/X830pxyYJ79w7XN67JIZdlrtIEVDJe4/xghXt -----END RSA PRIVATE KEY----- diff --git a/testdata/x509/server_ca_cert.pem b/testdata/x509/server_ca_cert.pem index eee033e8cb05..2760c56b4335 100644 --- a/testdata/x509/server_ca_cert.pem +++ b/testdata/x509/server_ca_cert.pem @@ -1,34 +1,35 @@ -----BEGIN CERTIFICATE----- -MIIF6jCCA9KgAwIBAgIJAKnJpgBC9CHNMA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBD -MRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTAeFw0yMDA4MDQwMTU5NTdaFw0zMDA4 -MDIwMTU5NTdaMFAxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEMMAoGA1UEBwwD -U1ZMMQ0wCwYDVQQKDARnUlBDMRcwFQYDVQQDDA50ZXN0LXNlcnZlcl9jYTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMZFKSUi+PlQ6z/aTz1Jp9lqrFAY -38cEIzpxS9ktQiWvLoYICImXRFhCH/h+WjmiyV8zYHcbft63BTUwgXJFuE0cxsJY -mqOUYL2wTD5PzgoN0B9KVgKyyi0SQ6WH9+D2ZvYAolHb1l6pYuxxk1bQL2OA80Cc -K659UioynIQtJ52NRqGRDI2EYsC9XRuhfddnDu/RwBaiv3ix84R3VAqcgRyOeGwH -cX2e+aX0m6ULnsiyPXG9y9wQi956CGGZimInV63S+sU3Mc6PuUt8rwFlmSXCZ/07 -D8No5ljNUo6Vt2BpAMQzSz+SU4PUFE7Vxbq4ypI+2ZbkI80YjDwF52/pMauqZFIP -Kjw0b2yyWD/F4hLmR7Rx9d8EFWRLZm2VYSVMiQTwANpb+uL7+kH8UE3QF7tryH8K -G65mMh18XiERgSAWgs5Z8j/B1W5bl17PVx2Ii1dYp0IquyAVjCIKRrFituvoXXZj -FHHpb/aUDpW0SYrT5dmDhAAGFkYfMTFd4EOj6bWepZtRRjPeIHR9B2yx8U0tFSMf -tuHCj95l2izJDUfKhVIkigpbRrElI2QqXAPIyIOqcdzlgtI6DIanCd/CwsfdyaEs -7AnW2mFWarbkxpw92RdGxYy6WXbdM+2EdY+cWKys06upINcnG2zvkCflAE39fg9F -BVCJC71oO3laXnf7AgMBAAGjgcYwgcMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUBuToaw2a+AV/vfbooJn3yzwA3lMwgYAGA1UdIwR5MHeAFAbk6GsNmvgFf732 -6KCZ98s8AN5ToVSkUjBQMQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExDDAKBgNV -BAcMA1NWTDENMAsGA1UECgwEZ1JQQzEXMBUGA1UEAwwOdGVzdC1zZXJ2ZXJfY2GC -CQCpyaYAQvQhzTAOBgNVHQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADggIBALUz -P2SiZAXZDwCH8kzHbLqsqacSM81bUSuG153t3fhwZU8hzXgQqifFububLkrLaRCj -VvtIS3XsbHmKYD1TBOOCZy5zE2KdpWYW47LmogBqUllKCSD099UHFB2YUepK9Zci -oxYJMhNWIhkoJ/NJMp70A8PZtxUvZafeUQl6xueo1yPbfQubg0lG9Pp2xkmTypSv -WJkpRyX8GSJYFoFFYdNcvICVw7E/Zg+PGXe8gjpAGWW8KxxaohPsdLid6f3KauJM -UCi/WQECzIpNzxQDSqnGeoqbZp+2y6mhgECQ3mG/K75n0fX0aV88DNwTd1o0xOpv -lHJo8VD9mvwnapbm/Bc7NWIzCjL8fo0IviRkmAuoz525eBy6NsUCf1f432auvNbg -OUaGGrY6Kse9sF8Tsc8XMoT9AfGQaR8Ay7oJHjaCZccvuxpB2n//L1UAjMRPYd2y -XAiSN2xz7WauUh4+v48lKbWa+dwn1G0pa6ZGB7IGBUbgva8Fi3iqVh3UZoz+0PFM -qVLG2SzhfMTMHg0kF+rI4eOcEKc1j3A83DmTTPZDz3APn53weJLJhKzrgQiI1JRW -boAJ4VFQF6zjxeecCIIiekH6saYKnol2yL6ksm0jyHoFejkrHWrzoRAwIhTf9avj -G7QS5fiSQk4PXCX42J5aS/zISy85RT120bkBjV/P +MIIGAjCCA+qgAwIBAgIUVaYPCm+rhznxJTRWV7wJKkmRuW0wDQYJKoZIhvcNAQEL +BQAwUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL +BgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3Qtc2VydmVyX2NhMB4XDTIxMTIyMzE4 +NDI0OVoXDTMxMTIyMTE4NDI0OVowUDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNB +MQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRlc3Qtc2Vy +dmVyX2NhMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxo6Cn80nk3i5 +PgmYnMBicmJEykEz5YbJEuyN+Mjv1wivqc23P75qvu7u0FPePptHZK+Q3PCnv7BZ +jc+MDQzZhUWN8jwenMGOxpVrX0zjK7Q0u92YbrHgxE9fkRA5fZcXGzZlrhsJQJUA +G+0QGCSzjWZvSab2JrVn/gYEzikcl81Q6zAJkTI9vACZC0vnTc6XsVC8QCpT71fb +qQwE4Bvr1tyuA6biB4H40RiLGWuG+8BoVn1pgSL/9GzRnsEnSN2KCfaqzk9VMDnP +TQRx0yJY+Zl5FB/ufeEJH8hh1OS3dAJhR7IYLktlm8S68dSI/oTs811BWIw1dOqa +KbpElXS5Tr9usGOehxy7q6dlazj2+nDzIhQ/20koX0dqyN1O8Pzi4OWcR5YQEBDO +8Bp9v6JNowwbMkZGSg/C1GMNwN4rEhLlAgpv8/4CoZwlQM0oROWiiZwczpVniDiq +6dYtTUhuJwC0cgJLSswDXpnAlp30hPB7EV5MIdr+9ybuRAx59Nl+ZB8g6utuWNaA +lNTrAsouwWBalHmY/f4/ltEnHkwgKCReYFHpDNuDVtnxhtEfGzd5IxQWNl2etWCR +Nnf7Z3DQHLTduIQD2R0qp73tqFK8T1DR/HZkbZnZPvBqmUIXvCnHJKKB3ntrkpqR +bQHMq6Tkv3NL+N3XpZwEz0AOeiRE+gsCAwEAAaOB0zCB0DAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBQl88evytJrL7t5BGRbJUeMOW4jaTCBjQYDVR0jBIGFMIGC +gBQl88evytJrL7t5BGRbJUeMOW4jaaFUpFIwUDELMAkGA1UEBhMCVVMxCzAJBgNV +BAgMAkNBMQwwCgYDVQQHDANTVkwxDTALBgNVBAoMBGdSUEMxFzAVBgNVBAMMDnRl +c3Qtc2VydmVyX2NhghRVpg8Kb6uHOfElNFZXvAkqSZG5bTAOBgNVHQ8BAf8EBAMC +AgQwDQYJKoZIhvcNAQELBQADggIBAMGqi2F7ccNQ0FSiALPUjO0VvQrUqdWLrc9Z +67rr7wBu4bEzchM+HQP9GwbnSnH9yT0pnYj2H6idAfqTww1kKuR4CYMkGsNJ9PYW +AgYdrC67HKT2xhy9YmrUItIe/pM6rRO6oNA8Np3IAEmC0gpVMqmPqHeLvwhxcf4f +izsi148gTGOxBIWVNupImFrOaztKV6SbVwA+wdHNJvXz4MEEYlMlgHFfkrAEXHfO +6QmHXru8C0BIQaMOiVZDN8YCwmsrcGFYjHFRS/OnYblrRxuVDdhpMmNiQRJLhZHi +jf6WOpJS7o50FmC8bG1CE0CqMNF/qz3Hap36Rm2w/xSems2dIqMr6FsH34KkyXzm +pCHN162g720orV1uExpgfRSfv+IaklN1sM98WkTqAkz9p6OPPEo4VHeVGUk/mFuv +aVnByrk7qmpTLBGDk7dFI0GjsNwOz619omgYZGliRU+7rDXP4fN6EPlF5sQO7MJX +REOSZvVcHPpIAIqTFRR4SBnwYGsEPQbTKTH7jJROg0TGmiKeN4N1syb4KNos2Wfp +ZZB+f2qmn6LXS6d2kI692UomRfGVNoBEsAhWBW7FzpU/WnT+aF97VpvWUEqzg/AS +61tKM/t/ap6kNTLaPGSWTk9Ade/KuMmg+nSrL6S1Xa0T1rl2Qjd5h7U6JLWHL94v +GPxPuBsN -----END CERTIFICATE----- diff --git a/testdata/x509/server_ca_key.pem b/testdata/x509/server_ca_key.pem index 114e2a37a11e..1f69f3435663 100644 --- a/testdata/x509/server_ca_key.pem +++ b/testdata/x509/server_ca_key.pem @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQDGRSklIvj5UOs/ -2k89SafZaqxQGN/HBCM6cUvZLUIlry6GCAiJl0RYQh/4flo5oslfM2B3G37etwU1 -MIFyRbhNHMbCWJqjlGC9sEw+T84KDdAfSlYCssotEkOlh/fg9mb2AKJR29ZeqWLs -cZNW0C9jgPNAnCuufVIqMpyELSedjUahkQyNhGLAvV0boX3XZw7v0cAWor94sfOE -d1QKnIEcjnhsB3F9nvml9JulC57Isj1xvcvcEIveeghhmYpiJ1et0vrFNzHOj7lL -fK8BZZklwmf9Ow/DaOZYzVKOlbdgaQDEM0s/klOD1BRO1cW6uMqSPtmW5CPNGIw8 -Bedv6TGrqmRSDyo8NG9sslg/xeIS5ke0cfXfBBVkS2ZtlWElTIkE8ADaW/ri+/pB -/FBN0Be7a8h/ChuuZjIdfF4hEYEgFoLOWfI/wdVuW5dez1cdiItXWKdCKrsgFYwi -CkaxYrbr6F12YxRx6W/2lA6VtEmK0+XZg4QABhZGHzExXeBDo+m1nqWbUUYz3iB0 -fQdssfFNLRUjH7bhwo/eZdosyQ1HyoVSJIoKW0axJSNkKlwDyMiDqnHc5YLSOgyG -pwnfwsLH3cmhLOwJ1tphVmq25MacPdkXRsWMull23TPthHWPnFisrNOrqSDXJxts -75An5QBN/X4PRQVQiQu9aDt5Wl53+wIDAQABAoICADoDco6TNRZ+PtdoIVdlfd93 -/wNQw+mPpF8tV2wsefZc09gT8auQv0az0nb7QZsrrpBUkB1Jxk2Ub8mob7fn/o1R -pjanhlfmyoe2VhjFcRwv/n2pWpFfjxixB2of5r/EWUwR02zwTkFUfsWAVgRI1hTf -Xk3BZGah9LC0LmfeboEDHW+Y6XtfCSYsQlobXp7wYMZ7MSFubWf7aa2Q3N5d/MlG -RqYVZ3fCVHnioMgiJkvDG4d0aXnyvXpTarBkJMGjkVwjJ40dIU23cBhOW0alW7JY -t+S4q1waDYxeR5HA7O8gykCeYZ4wSo+ANpD6q+h+uYchLLmh93fDfwTxFU8BhK6a -Dp8ikyZe7hjEba5a7ZvfOXedOZoLqGuUF4P5wI0Hfdslqwq34QSqMiHJuQGa+dM+ -tqnxTw8TjylYysMJxkqipA91uhO9AWxUc37jkWOY255kXcQdKwx5TdQN25XDDjK3 -BNiGtWIEuRMoflO2tL8AmaATOYbVuC3rSm9vtK0jre09MwLxihuzd8fgGBrtEx5S -UMaBAGDG1F0lcdxQY/h1byL5g1y//N472Ir0PLGczMPBigy+ZEy2GNtwUniwWOWH -z8CE8BbCr4PMxaqR/qU4hmEw6E3mB8w0WMMGQRn9+jKwxSZaIsE518Wa7oVEx02d -LZOu9b4xNslw8HjwaSKBAoIBAQDvas21s1EhtgKZaLXNyVsWaX6Mg1h0puylvqlg -G7t7F7XRV4gPb21e65y29V42vG/r2KB/AJh8eHTFYrOPSPPT1ZZxfxD7yuJGliYc -LwMU9QWkks5bFEP8nHogBv5nA47Ve+ctgrkwhZneWS896EI0Ulzw90oeOYgzJAmP -u0IVx6k0SlYKw5b31xWdwRehAIiz0UFufn88QtM3Fhj530It/+mqvrT/MR93XIIm -0tFLOIGz0Tp4yLleB1h//9xFdLUgDAGXgyC2ivlq5H31rGwkZr0Ixiwm8VOq1yvF -/ZofDN37RIrIbC2O0shFbU/L4KC99Uu5gDk/bu7INwLrmK3JAoIBAQDUAMNz0Ewg -cR1hlJ1mDD0IuKHjgjwoOslJ+r/P9tMfXkNudx7IRrnsNlJzB0RYYFaZiyXi4dXn -nN1C1ePIXo/kfw18Bvl+GIUrHV9EZrMJ+OfdWyXOzv6kfkT4B+axUJpqCTA3aalr -+mI+EpSjw5IHzgEL9cZBlms2YSu6cDxKYXm8sjQ7w0OKSQFsdv4rIfT+xwVyVHMW -1vn2tYdxnnidzuGUFt1Fhx8SnNHSu6K3rvjoc80jjg3TuOeKtil5AwVhtxqpX5HV -XAdQwFSZigSkjypnvIlJ9YLVl+64U24UQXBZc3qZdImZqKn38Dfalaiz52CWZPtt -N6HFzJTAjcmjAoIBAEgWe4wLSxGAcTXp3lvxFfkgmJcMcVTmcfjR+MPUddXxZLB8 -z53+KgtbnBtGtDB8+qIj3ud+sWtBBb/tIS2yhKAy/pJ79QwroYgpa54u1Zm40RMl -lPa9ml70ap08Hdu8qYREQ25jnwkqIRNe/SeByHVim1N+0hVZs1XasvpRIuvV62+w -NkoVbF6Bp6ORYWD7/S1Pg4kWk478fAZpI+oQvCeHl77unyb7joLtGs8/yP8CK6OO -CzIVFiNmyNH5o0RSiLr2goAxXmc4XzM9S2Pun70yJhb/PIoZPd0B3s9FteNFh41B -rRv93pXTh7PH3y//Gcc4la1sG1CrQUCNt9ZiaWkCggEABHHXpy/wyKVWdltFSYRs -KyijzD9Iv5cr7S8ioluMZZX2V/SLYquI7ljdNagrWKb8ac+vBaiycV6qjOIrGmJR -Jfs77yO+S1R8RkEhZC+7BTSAt/VXP5S7Zft3urN/tKv58MsshZzjfm4LbT26fAx3 -nU5GW1fVxj4/FS7IWepMeUq94KTjz3Tyj42kR//eqEzX9Bd8F7+JgisTpoZ7xngK -E1TpCc/I59JDZoJ/K6nfaXZzpXv4CwzJYWz4/cF/8ReNH1VVa8OjLRP220yM+YMZ -QdH2k6IyRqitC4lZ6edl4WrVzipLobf9woj0t0wD/8MvfEYXkk+frdSCwcDeRYMz -fQKCAQB/kbirzZfwH60I6FIlCHohRDJ3csBU3n94UqSOqi/zl6ts+Vgrdq/36UuK -lww51o1FmtnpI1jaVw8Sug7N4xYUkgCmyFvLU3SUOw68xzPxi7k9NwI+M1jH4ZMK -JVJXHaxx2bY35rf+y1NKOge24uw//C1aEmKq4Dolql6ZiJlVGUna9lp+VmcDa+XW -OzGfJWMZeSh2kI8cJrTCrar21zRfF2c6IsoKdDBAmZV1qSgzymzUYtYQ2P1s+qRS -Cs891gpYRQMchfec7FefWdFYXgEfLRp+nz4WoLaIwK+oftPHl96V1z9rS1Zs2HXD -okA9YtMucwgrhGFv9T0QtBuq4aEC +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDGjoKfzSeTeLk+ +CZicwGJyYkTKQTPlhskS7I34yO/XCK+pzbc/vmq+7u7QU94+m0dkr5Dc8Ke/sFmN +z4wNDNmFRY3yPB6cwY7GlWtfTOMrtDS73ZhuseDET1+REDl9lxcbNmWuGwlAlQAb +7RAYJLONZm9JpvYmtWf+BgTOKRyXzVDrMAmRMj28AJkLS+dNzpexULxAKlPvV9up +DATgG+vW3K4DpuIHgfjRGIsZa4b7wGhWfWmBIv/0bNGewSdI3YoJ9qrOT1UwOc9N +BHHTIlj5mXkUH+594QkfyGHU5Ld0AmFHshguS2WbxLrx1Ij+hOzzXUFYjDV06pop +ukSVdLlOv26wY56HHLurp2VrOPb6cPMiFD/bSShfR2rI3U7w/OLg5ZxHlhAQEM7w +Gn2/ok2jDBsyRkZKD8LUYw3A3isSEuUCCm/z/gKhnCVAzShE5aKJnBzOlWeIOKrp +1i1NSG4nALRyAktKzANemcCWnfSE8HsRXkwh2v73Ju5EDHn02X5kHyDq625Y1oCU +1OsCyi7BYFqUeZj9/j+W0SceTCAoJF5gUekM24NW2fGG0R8bN3kjFBY2XZ61YJE2 +d/tncNActN24hAPZHSqnve2oUrxPUNH8dmRtmdk+8GqZQhe8KcckooHee2uSmpFt +AcyrpOS/c0v43delnATPQA56JET6CwIDAQABAoICAQC80PSi5kMGWD1AI3v/RGvZ +/l0QQOULFhvMZSu1M8/wGxCBV2E1uuxj2W88qSSlQKCpvNLzZ979yMPAuWejWV7Y +/4W2nzk1NFODwL+0hrdY7itfo6C7U2g9BoYIuvcQ2Udd12LmKEuqIIdUByHQ88XT +Z1/ZGG7n7IaR6ENVkX7hVJvoq2vNqYtPZvoi5fF16koSkoYSNq5O4qu+m/Fe9O5X +CtBoJKC5Jv3oSYCtkbVxXk1aQjS8Wv4v//NvFps3DYWhZ/KR8ps+GxtpUBq1/unB +ohKj8qGnDwLQOIvgGgfiyAieV1vrWkOr1283XTdRYjK6Uyo6/Eoxfo9PsxRZVACK +lpkRGn2p1GTbx442INrOwhJbJtKAcSlM7V8m2dAhgMTXXJzPtl0eCNWGy0s1obpK +1p9qTUz23s3WA425TAXQwIFpq0CdrXPgaXmZYw5LlfuwQhUmw5SQBOg8dX30Y5rb +vrrBBj1+2kAEYyeC3aQ3HwP6qAevWtmlbyCLMS5pYcPTRIEJlVX7OMCZSmvYnOku +0Vghgr2g+FZjNgQMo0HYEq6bb1jmpnN6tY4y3H7Zq76xavkFBWzaz8aHEB1koBoW +e0z+AaojAXEMaAKzsA3hF74HZA4kZf9kFi4er4ZLlnZFrU7UCN99mQfsV0LU+75E +rpzoRVYE8CZN9egiphZFYQKCAQEA8AUt1mi3tDfbyuDnut9KVkraCDAQ5WX+31H9 +BTPdIoXaeYx29zCKSEaBAMvf43s2pDgVPVyuc7KVFp8l3LZZ+znNWxU3eVznE2Ua +1QUTR8ZxueUIyOZxuSHU8xqWvxZ7SLZYTqTVmYGpORjvuQIgS/+6GrAbCNcZLNIo +I7U4/Nx4zJ3bkF1xaKJcaVXQRwG0cHQEZZdgkaE+J/ak3WTRC/11tEr4agGo+UDF +6XBQryp3HxPMnZyAv44zOB9gqD/FvnxH6S5ybc93eISQXZoJQuuad3GuEn28Gz5x +Bsr6zzSkhsTp2cEr2AVSH+b/SCseZ/2JEE0mjPxgjXRVLQrnGQKCAQEA08akFJjH +nq0aEcL8HWbUE2PuuXcq/vUZHFNv1gG+e6vZ7gU7bq+5SS3+APuerPQrhE+3mKsn +WKUfkr2Rgn2Yxieo/u+SKMbDaNNT5h08KgA/joewRYDeInACw9QHsKPKArWmWn6d +6fHYl5d3rxUPGUaM0fsdOvayX+xeN4PDn+puqzInhh3TFbeyvkpu1b/xZZ17u7Xz +yvu1PRHNt5eZB6QZ/GF3d1r63CTC7m3GF/wikoV9Ygy0Me8gw8WxqnF42GFRyUIa +wHXfb3w4Y4rZ17b7x5H18FF4tHMgXN0pNbGk+PoIFB3Erzfqi95xVyZh/MROwFiL +qy2KeRoniUbCwwKCAQBDvxJ7DD+dzI5rKyP9KP1Qcfwsh3SdazaPThL+nu7xyZoq +6KzDhJ3jXJMY6HKfQK3hmDrWgQx0d5mBMxZ6v7WSJXSDGu/3f3Nxk/4I1k/k2GxN +LgpWukSrHpN+sqiN8wiFM4KlX/0yQNjE1vcC30jCasHauo5G5n+imQbfXU1igdBO +4NeSXe2evQUcbi5FfIOzoeuDyUBmmn5yxTkvjD89BSNt6iNHuIQ7Jj82bo83geLx +kKMWcZAdgUOPubuMgcOMyoN5m7SMrhxolfIxmUK38sw8noeljHvFrNA2PKCiT5eI +upfO8KkxZf8SJh8z/YetjnBbe4tADBQsmQNZnVQxAoIBABNKmxPNPxHzTtajXngH +L/Z8OfjnJCGJjjoIV7209vcpFncaPum8VDKYX/US9sdmjrhE0sKzhKgMkq25WxH6 +Avq6Dij7BeN1B8P6zD/AFgT1dNS1A5exP4r/jSDtpa2vne1VQswnkJcJEuPsRljK +oE97H8TZDTab1m/qhkKkXCOrJV2u+e67tMjbrQqsmSAblg/dorHcx1KMT1w6zPSW +eLg7eKqG7m0O+p8nMiKqGUuCClwykNNnuNp7oA51adPO9mUvqFWfEfTKSApN1I0s +zt9ZqeHqJ+82XLqDakVLWD+t6QtNK4M5mvsjKtiG8OgxdOejslDPQBnd0ilp+oQE +0CUCggEATL5jg3Kcbq9HcaFEdgqzVXXcqNI4hOVWjP3Vp6W390PUZkmls1Igms3l +zc9fhibHv5gRq/Y3xQGUuP0dKuPuTykjXUTRozeal5ErkiLU6PZeVlz7xUHbLrF9 +J/IARiZtnfhcDh8qo33QZ4m4HUB5b54/4Gdzr7go8o0UVI50gXY65kwz5hV79/7P +S38hxS+X+k2FkWuCNrP6ECkC/L8jNM1h2gR4Ez6FziiNxYbOhQgsaTQRKjca/v/E +tzX9oZjMqiWtWKcxgSCZs/1DscXjjCsi64f/oOBShF1x4ZeiMoZTMhQBLMlbVx/n +JxRWkMCYLEbAZXWJXEKHyqaiIAOpkw== -----END PRIVATE KEY----- diff --git a/testdata/x509/spiffe_cert.pem b/testdata/x509/spiffe_cert.pem index 0ce0a1aa190c..bbe7d66a4bfb 100644 --- a/testdata/x509/spiffe_cert.pem +++ b/testdata/x509/spiffe_cert.pem @@ -1,33 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFsjCCA5qgAwIBAgIUPxiyjwxoDyMeRvl4g9TSdvLlCA0wDQYJKoZIhvcNAQEL +MIIFsjCCA5qgAwIBAgIURygVMMzdr+Q7rsUaz189JozyHMwwDQYJKoZIhvcNAQEL BQAwTjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQwwCgYDVQQHDANTVkwxDTAL -BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTAeFw0yMDEwMDYxNzI2 -MzFaFw0zMDEwMDQxNzI2MzFaME4xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEM +BgNVBAoMBGdSUEMxFTATBgNVBAMMDHRlc3QtY2xpZW50MTAeFw0yMTEyMjMxODQy +NTJaFw0zMTEyMjExODQyNTJaME4xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEM MAoGA1UEBwwDU1ZMMQ0wCwYDVQQKDARnUlBDMRUwEwYDVQQDDAx0ZXN0LWNsaWVu -dDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCV84YR/EV55qfFynHh -QvWEZW5hUI9q0DeD5kG5CarrkOj11rZuQIBZ7X23CJbeoVbrvbYLghsPYJzxS/n3 -Qlwwzb5k+L0Qt+HrBD836HcSK5k1oh0jGGMaGownap+XCZH9g52s/8iiwfI02CmN -TbwsNp7wtSEFgNOd2OlzhT6wBLF2Q6uxfBmsDpiChxe2Fs1lyan9RH8fYEf7sxwP -E+SgBfEs7dSG5ZwFfdF+pd1T3IfrVjIxechKO1MO7HTSxbOTj6eHf1NeErDTGPA7 -VrnDCupRgcDGyAhFd54r62R8TbTjn5MwzMxElO45Ck/Ej7Qw/GWeaBHj/dMa6mhE -R55PvnKuyj+k9t0Rf0HDZyONtY5/OLqI/xVr27Y1o9v5FysNgjWPkZMRpvuCzkeC -2RuE6k2TfBDRLiCyYu/Zzw+ZtUyTAKtWtefLdQBjrYpnhrDPpmrnTWomX/e9pylE -WfkyxCswiPnDw7ypI7uFSTkz0+bUaROmAtlPvR+3SjaQDWigwz3eJsdIaeg5AY9q -//rWaal6l2iR0Ou9L6A9lLxh5iN/ch+OGk4QPK6pFbOy3IqYfmQ+IpAXG0da9RT2 -EN76cNa3bldEjRRON8oQ3HZmhOQJqVxhQciUz84sTjAqH8WvqqbdG9HKUoZ19T5Z -9vNldjlQn33Mi5gBxdugqdnmCQIDAQABo4GHMIGEMB0GA1UdDgQWBBT8rr0kPapk -bGLJ4EU1582sw7WlOTAfBgNVHSMEGDAWgBT8rr0kPapkbGLJ4EU1582sw7WlOTAP +dDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ4AqpGetyVSqGUuBJ +LVFla+7bEfca7UYzfVSSZLZ/X+JDmWIVN8UIPuFib5jhMEc3XaUnFXUmM7zEtz/Z +G5hapwLwOb2C3ZxOP6PQjYCJxbkLie+b43UQrFu1xxd3vMhVJgcj/AIxEpmszuqO +a6kUrkYifjJADQ+64kZgl66bsTdXMCzpxyFl9xUfff59L8OX+HUfAcoZz3emjg3Z +JPYURQEmjdZTOau1EjFilwHgd989Jt7NKgx30NXoHmw7nusVBIY94fL2VKN3f1XV +m0dHu5NI279Q6zr0ZBU7k5T3IeHnzsUesQS4NGlklDWoVTKk73Uv9Pna8yQsSW75 +7PEbHOGp9Knu4bnoGPOlsG81yIPipO6hTgGFK24pF97M9kpGbWqYX4+2vLlrCAfc +msHqaUPmQlYeRVTT6vw7ctYo2kyUYGtnODXk76LqewRBVvkzx75QUhfjAyb740Yc +DmIenc56Tq6gebJHjhEmVSehR6xIpXP7SVeurTyhPsEQnpJHtgs4dcwWOZp7BvPN +zHXmJqfr7vsshie3vS5kQ0u1e1yqAqXgyDjqKXOkx+dpgUTehSJHhPNHvTc5LXRs +vvXKYz6FrwR/DZ8t7BNEvPeLjFgxpH7QVJFLCvCbXs5K6yYbsnLfxFIBPRnrbJkI +sK+sQwnRdnsiUdPsTkG5B2lQfQIDAQABo4GHMIGEMB0GA1UdDgQWBBQ2lBp0PiRH +HvQ5IRURm8aHsj4RETAfBgNVHSMEGDAWgBQ2lBp0PiRHHvQ5IRURm8aHsj4RETAP BgNVHRMBAf8EBTADAQH/MDEGA1UdEQQqMCiGJnNwaWZmZTovL2Zvby5iYXIuY29t -L2NsaWVudC93b3JrbG9hZC8xMA0GCSqGSIb3DQEBCwUAA4ICAQA15Ne+Lz5cN1/B -fkys4QHDWJ0n5Zy9OtwSW6aTyqIIwls6OOSkJn3qJMoT2oFvoHoOxb0swyN+zUoD -pmPEd7FHkMm8BhRqoyH3UZGR7kOSIIcfvldVZbW9mD88A04qvLsWkkanMyGhkYV4 -0TXyb8USdjeNm1H32iF4k24czSpvoOYo9HOQv+4aFcqTMnGwS7CvwU6O6vVU8gIy -HYP/oWnkhap6X7acjPxYoW5IDZdN9vdMz9wQlKlc799lWqOCuwl68NSuTNcNNFyn -TXfFWZaghb7iXsUezGYTY9glsPxY0Egmbcmxut0gz0U2BNVvNGKUUu55MlAS7yXO -Y7eTfSSf6DJesFQKwTg8qlyNLjzbLSmhvz6EPV55ToUxPPA9CIOrWQwXv4GdySuH -bwof3U5p/cq2NDtxv8KGisjK04l++s+Ea8AS6T6O8+08nBFGgfNW331eWtU91JoQ -e6Q4DWipiNzkIvISk48V8CT9eRB2KD7NsigQprePRN3gDZREh+01gwbVUX2gbtHx -1RGxEjO6H0kUuaoXF5E6+WGwgn8MA47qUy1WXC5QDFpc5LyaoVaMFv8bcoWSNXAS -Oes+ZDWDXWq6F+9Kt0zWmO651cVquLTjmgt48fgL6m8rU13ikjH7dFnimrwRxfOD -p+z97N7TvWfgE1HOmYDfsbaHjPFZKg== +L2NsaWVudC93b3JrbG9hZC8xMA0GCSqGSIb3DQEBCwUAA4ICAQA1mSkgRclAl+E/ +aS9zJ7t8+Y4n3T24nOKKveSIjxXm/zjhWqVsLYBI6kglWtih2+PELvU8JdPqNZK3 +4Kl0Q6FWpVSGDdWN1i6NyORt2ocggL3ke3iXxRk3UpUKJmqwz81VhA2KUHnMlyE0 +IufFfZNwNWWHBv13uJfRbjeQpKPhU+yf4DeXrsWcvrZlGvAET+mcplafUzCp7Iv+ +PcISJtUerbxbVtuHVeZCLlgDXWkLAWJN8rf0dIG4x060LJ+j6j9uRVhb9sZn1HJV ++j4XdIYm1VKilluhOtNwP2d3Ox/JuTBxf7hFHXZPfMagQE5k5PzmxRaCAEMJ1l2D +vUbZw+shJfSNoWcBo2qadnUaWT3BmmJRBDh7ZReib/RQ1Rd4ygOyzP3E0vkV4/gq +yjLdApXh5PZP8KLQZ+1JN/sdWt7VfIt9wYOpkIqujdll51ESHzwQeAK9WVCB4UvV +z6zdhItB9CRbXPreWC+wCB1xDovIzFKOVsLs5+Gqs1m7VinG2LxbDqaKyo/FB0Hx +x0acBNzezLWoDwXYQrN0T0S4pnqhKD1CYPpdArBkNezUYAjS725FkApuK+mnBX3U +0msBffEaUEOkcyar1EW2m/33vpetD/k3eQQkmvQf4Hbiu9AF+9cNDm/hMuXEw5EX +GA91fn0891b5eEW8BJHXX0jri0aN8g== -----END CERTIFICATE----- diff --git a/testdata/x509/spiffe_key.pem b/testdata/x509/spiffe_key.pem index 5462d66326b1..77a33e9d0af0 100644 --- a/testdata/x509/spiffe_key.pem +++ b/testdata/x509/spiffe_key.pem @@ -1,52 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCV84YR/EV55qfF -ynHhQvWEZW5hUI9q0DeD5kG5CarrkOj11rZuQIBZ7X23CJbeoVbrvbYLghsPYJzx -S/n3Qlwwzb5k+L0Qt+HrBD836HcSK5k1oh0jGGMaGownap+XCZH9g52s/8iiwfI0 -2CmNTbwsNp7wtSEFgNOd2OlzhT6wBLF2Q6uxfBmsDpiChxe2Fs1lyan9RH8fYEf7 -sxwPE+SgBfEs7dSG5ZwFfdF+pd1T3IfrVjIxechKO1MO7HTSxbOTj6eHf1NeErDT -GPA7VrnDCupRgcDGyAhFd54r62R8TbTjn5MwzMxElO45Ck/Ej7Qw/GWeaBHj/dMa -6mhER55PvnKuyj+k9t0Rf0HDZyONtY5/OLqI/xVr27Y1o9v5FysNgjWPkZMRpvuC -zkeC2RuE6k2TfBDRLiCyYu/Zzw+ZtUyTAKtWtefLdQBjrYpnhrDPpmrnTWomX/e9 -pylEWfkyxCswiPnDw7ypI7uFSTkz0+bUaROmAtlPvR+3SjaQDWigwz3eJsdIaeg5 -AY9q//rWaal6l2iR0Ou9L6A9lLxh5iN/ch+OGk4QPK6pFbOy3IqYfmQ+IpAXG0da -9RT2EN76cNa3bldEjRRON8oQ3HZmhOQJqVxhQciUz84sTjAqH8WvqqbdG9HKUoZ1 -9T5Z9vNldjlQn33Mi5gBxdugqdnmCQIDAQABAoICADWoJXJsHgRHyAMbtPJRPn94 -uC20YQ1somDdVOk8j1+pw+KsSS1cgVEsjU6gkTPq8ap7gRfPH5W6EY66jCCxK0H/ -bUC+TREda4boRyLfWTQ0S6eIcfqr8FJX64zzN1YZg5b+sL5F7Opokh3ct8mrZkk/ -5lHlzoIknhSemLLQnCTqGQJjpp1k9d6+fk4+vvpWYHsq1VweVYrJrhhf+AthJ+8n -ESztkZ4PrWu9oOg7u94VTMGmX2Ga3VPKtKbjb844FlEYF2+B3TgNYh63jsb8+o3T -axNtZaj7zRHmgr/ehF+CgtbstAPDVNi5niDlErQYY/cfadFsFfLKUe8Qr+y23+vG -1WuVSUmrUcgO/IYMIz2gEOrBOutc9cdKOlCnwrXu3WjSGO6zhcbXCw7WZrSR/Uj5 -1Tatt5QJ5Z3i4vOc6Jj1XKL/9Xa+FEryfVh/HKlQTlHnIuuGXMBpIzyYQ6kY8+cH -n75FVMo4lB97c48hweupQY6SUQwvWXqXQOAxLJ/eq2k1QpUWJ4GV5kRr3/eQ/AZ1 -y4Kk2ZxM8IWksFdVnomNr65GIk219D1uwDtJQeBrwqrYseGq/2mB2h4llTbwjSez -GkOPO74tLPh3wkG8wDzbc94nfouxCL6ee9W4XeDGzYXgndSKAPOWWUyFnsIxisVu -BB2HUkJZotG2Otrgnj/xAoIBAQDF7NjT9JkN+JhrmH4jG+16lI6RDErf/VgheSE8 -G/ayAg1RGY2FsuyTi2bAM3xprXqaZHDSikil8t9G7JJZoKVzesCZR+OJU+9fGZR6 -TCS6mCdv6OEG18GJP9dzDLaqYybJ9VgnSnXT7mRlcCyU8uZ9/2FR/vnLEatbOw99 -2tles0LGdkKT/YIYdxIENIpmaZhzAOWPDLDwDTO8aIXy40DiGzjLWJf/0LgCV7Ub -2C8aPS4WWXCOdAeYvjcEKeCp9+YSgZaYNT2P8Ns40VcL/yysMtFzvnTOcA9riyzN -5pu+ppv/KYGt/ENa4zQMCgKFtTUxicr2M9VYqNI/CgJmJPKVAoIBAQDB8x13soMv -tohfiNwGTjEkVzu/RkTCix8+hERF1C+oupL3ykpJOGvvpOyhFUqwFAYyBQKflrxj -9lQBKQiPYR0VtgDFJ7UzjYGO8zt8U56gTcYeatpNKY4zvZyGAOBhtWuvuvi8pnpc -xO8yQjE0jWrwWL3bmf/5lP2jO8j+k1qZfrA2ksTGUWRGEZkFRqqTQxvRrJVr9QiY -2xpRq/n7fq9UhCfNxm2aLdYgZ/BCVFzNahCEWfFdH3jOcP5N/5Fkiy7IhwfdglO+ -JjydEMqBYHg3ET6MQ3JYM/Gt/GkX0myV9BHd/fYYF2xGBMDIMLVdFKyCivUMGGt1 -pHhcNLzebFylAoIBAQDA4Cr4eh8Az2XxTDx3iEqnLsezr8/zcVYF4J2zjuib1YYW -pxkT1iXXLnymBkZSUVztwb10Xo+nMAPHgNipgPRakZ/If5bLh8D34tyfRT5xm76q -vr0zRuPyFQWmtxf2+QKewnjyaQxjx6eMdoDrcb2NwWWcWyYfbwuWrvpMwg0bzQLg -lfQRdXTm1Hn5IR5R6MtIHvKVsV9nvuXQz+bgp/bdoHt7Jc2R3FrE5aW3Cbf1EPOt -keEu4QFaJttEMm8eE1bgZ+pST2e7spJfTxlNtpBZCni0G0CGwAs22PyDdhwF8SSJ -xm/6FZ+pnUlmBgcpN0osCUSBIkfgyzt/dQibc5v1AoIBAF5ZLweQfoLSb+rRf/9N -QFimWvlEbKSa2vslirTRcNHK2T3TWWnfGZq9hyMhYXDgfNcOWuVZhZG3PcxGstRU -8LokDKHcHCjU+KaaqmBjqTHgQ7V+U23f/j4rSh5iBMVjZNxavy++aJ4Caz3ut1MS -TGhZMxrGAqDeGriyl6dH9XXgDEawBStYYsg3PVI0uzviFIFeTF31GFaLl3UNjREL -4qzhkR9oHN841wZyqY0Kzw5aP2iy/FhJvBHpI7y7y3W2w25nSas3ABfrL+dUSL7B -OBnJuLyw/snrkvEJbfJZudsEnUB5j6LOmixBmaqJD2EVcooaoPReWMAk3ywzt4EY -A8UCggEBAJbArCsQ0Q+pFOEce6JlgtYAdcBiu8n5zFYLD2/qZdM2ir/09uj9XDpC -WbE5YTumgzkt2VLK/wf+HATMAyXObtaAn5AdoA6OJ1AvbNGOwA3F2LOlbJGO2XOW -TpQlgbDvBBktaKk9PSszDj92W2tdFQPefDP/uBzonymev7BWCERZ/vU2L3HpXQjp -bxzRyVNWwwg3VBYvbCIz8v2yeviAsiEkOCPRU6+cIr7/VUr0mymzDgfzKaQNoOap -LqOpnInw0pUA+BhsVr4n/fBXZLbSd7ZG5WU48HUaSLefEI4NuiZLT2K1tZreyBqZ -Xgln2zbN6APAb+dGDdv27dz4YlasU4s= +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDJ4AqpGetyVSqG +UuBJLVFla+7bEfca7UYzfVSSZLZ/X+JDmWIVN8UIPuFib5jhMEc3XaUnFXUmM7zE +tz/ZG5hapwLwOb2C3ZxOP6PQjYCJxbkLie+b43UQrFu1xxd3vMhVJgcj/AIxEpms +zuqOa6kUrkYifjJADQ+64kZgl66bsTdXMCzpxyFl9xUfff59L8OX+HUfAcoZz3em +jg3ZJPYURQEmjdZTOau1EjFilwHgd989Jt7NKgx30NXoHmw7nusVBIY94fL2VKN3 +f1XVm0dHu5NI279Q6zr0ZBU7k5T3IeHnzsUesQS4NGlklDWoVTKk73Uv9Pna8yQs +SW757PEbHOGp9Knu4bnoGPOlsG81yIPipO6hTgGFK24pF97M9kpGbWqYX4+2vLlr +CAfcmsHqaUPmQlYeRVTT6vw7ctYo2kyUYGtnODXk76LqewRBVvkzx75QUhfjAyb7 +40YcDmIenc56Tq6gebJHjhEmVSehR6xIpXP7SVeurTyhPsEQnpJHtgs4dcwWOZp7 +BvPNzHXmJqfr7vsshie3vS5kQ0u1e1yqAqXgyDjqKXOkx+dpgUTehSJHhPNHvTc5 +LXRsvvXKYz6FrwR/DZ8t7BNEvPeLjFgxpH7QVJFLCvCbXs5K6yYbsnLfxFIBPRnr +bJkIsK+sQwnRdnsiUdPsTkG5B2lQfQIDAQABAoICAQCXbolwqfHVFQ/OLRLzsZvy +UZGeIY7UUxKrAyPSoNvJFpr7DG7n7arOcaTOG1p56aYyYPvHIrB7FKpQggnSCYIy +1j89BoMjTKu4gsKWad72+ivB/RmRPYGOHUy6QftXpXQ9c0Y99weJ2iMO3zRR2269 +BbG0pCd7ppCbJquWP5IKVlhl/cxjHS3vd/YPZorlS1QUhpsMxGHfFKLzfHHk5nX1 +ZIHlctZIHeWw8VG8W/xbbnA2RhcxnY42vqAG+/NCkgZUAM3WU8zWfU0WEZ3Imy9Q +HuPv7m9H+vyBYSYQR7eh3nfAVHnHeRBKQX6hpQ/PEwHneXzVmZVnaaZD1l35+oQs +9mCmIC5PkwGe+vJI+Rxt5UElgzRDMVF6YuHfmobQQn5mT0Jsbdn6tfaFXa+e8+Ja +4WSlv/rVNSvCxNSwV0fOGIIrV1CIElf9ei7Go/Jewz94Eh2PdoOL9gNPqBeFFLmt +mS7HgST/Dkn8yAvyYgY5IDDWDiauECMV2F37QRk9stX+dUFhDQU3P/CcMH0SRyjx +vXRRvWY/5rWuinJcIr1Kb0OHiDztrM/4wAYc1BQjsL6CPKT3aAmCs/fxf0JflK/o +pvzsK5+AyBrvEBh9SXPcQYYEj6ZYCGZ/rPVlmgOjT/d4+xZf0FBUi3g+np7V15Km +ao4LgfcSQF5xUVTiyGo1AQKCAQEA423AcschurSrHwnmL1E5UIaYoFyKok2W3GZ6 +nu8Gp49GlF1v+CQ4w0zMNgigTTHxPyShK4/lC1n8W7liuo2TD98Km4qsnkD04R1z +Jf2PNBoVfMFEwxTA0t3LYfqJc3xLxWTR7wbvIRwtrcM/tFbU0RQAZ0/eE5Vine6t +fU8HBn/kSnUeCXMjFqheRccluYQgwES5ayJjnvf4Yrwe+In89+C6JE01LetqtxiL +U9X6iO9VpF9JVI7CPTBd7cZI3jfO+N2dXGlnt/Dp95S3RymoRSyMA1ILCCFZOeTo +Kh26/6hfgc2ox6ttN+s/1J9xPaXFmvbEAT27n+bDFVIfvF9QwQKCAQEA4zx4xLsW +AdljDLSDcLziWE1ikvO25hWiH3Q7ZyFi7vqby4d7FdgazNERrFZaCNQeSLfMakLd +zO1URfWsG7+6XvY45VtgVlqw2+uAHlE/B3FtrSeaIDr68AvWfPo02vckRMgTCBch +MEvul58A658mlybOkTHoRyaeDDD+83sIHAnyFzublxfsfbdgqzFW1RxfiUwmAM7w +9rI+PFPQnBgBkfyjcOfaVx/I1uvm3Nnl4ZUAmEdz6YlJdN6EhM8SjCP2LsLvhDUz +kjZ5WJ50ybRs3DWhDH/d06DmFlGwgu894TKBHqrq2fgDDukpZkH7cHldcjugzlHJ +c2CAO9MxI4UyvQKCAQAogEgQaKP6EuiSe3nRnV5el8mgbTqHEtg14c4edaSyvFIu +Y8Fn6FNvfEK1sK2TcbxrqUNGdbatYdYOI6KQZFv3LJo//t8kw56YZF04O8J/3dFL +yUNMlmqMYtEwXqSRu2Xm/kBgl9SICfOciTPUEs6NeUllHJUI2caZJ4Mf2K4Am0/1 +bovt1OI/y7YWKRPvyLboZpS6noItMi26r5O4YSJ6pjuf8VvyFIWJm8ZcJLQcJLsU +rZ9qfo3axb1EddZONJQYP6chaOf+mtmfrI1DEAkWYIuCn961EPNJ2xj5PxgpJTv0 +6sIO5NlrZuqUG9zXxKi/Iwjey7aZEEhXiKt8KWFBAoIBABtcRaJScHTqit2VwpnJ +dGtzbeIJzETp5+pnoVtqjrH9pNKdznkz2w48QieBAjg76iWRU+Cbin9JODNwQDfb +HwKeHP2owfHD27WvJm8AE1m/E5icwxcMYviSRFIqAkE3LrvFZ107A7j/+4twDrlQ +IWJjvs2Gt9QRV0hagegpMTHHFMotWC+aJtSARvh16WGhl/M9IvpH8IWTsqCq6txQ +m6fLRpaqpASHhDQ0lUiUR/Sgb0DmoZNF/3096bDgCfirv9GjkRlXGo2JV5UPBzre +KZleL7UElF4N6oZXcaxiSA4ceaWKqNpz3VJnSp/QZAkH4/OEMHmHKX1l6irJ5AnF +2PUCggEAQzXltQN+24eNh9nD9BzJR7CYOJQ4CzmHbkxJJZXGJtSCv0mYcEsSRmpH +5mW9hr8w6Y5WL4XfMduUX9od8exJtXl5d7Fl8oUNh+CwDr9smEjEDchh+6d11ZCi +Ervr1XOmNyOtpnQ1+N2nbbBEMLVns9yX6oNnl9mBdwpvwko7Q9ahyTvWnE2oG5At +8VeX/34k6BWdqPCJfnISMVbt6D+J0kaqaVw6BplTNSRs93bmqKwpcrRHMTida+bO +l9t8Cy3TguZdRWTJZ0kFmze0fV6dXYookZoUIeisTZBl701tOsvysZjxtMQJfTLJ +Io+0lEzXxTbCbBP/iyizjo62XTgpdQ== -----END PRIVATE KEY----- diff --git a/version.go b/version.go index c149b22ad8a1..353cfd52862c 100644 --- a/version.go +++ b/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.37.0-dev" +const Version = "1.57.0" diff --git a/vet.sh b/vet.sh index dcd939bb3907..a8e4732b3d20 100755 --- a/vet.sh +++ b/vet.sh @@ -32,37 +32,17 @@ PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" go version if [[ "$1" = "-install" ]]; then - # Check for module support - if go help mod >& /dev/null; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - else - # Ye olde `go get` incantation. - # Note: this gets the latest version of all tools (vs. the pinned versions - # with Go modules). - go get -u \ - golang.org/x/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - fi + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -78,8 +58,20 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + make proto && git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -93,7 +85,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" @@ -101,38 +93,32 @@ not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' -# - Check imports that are illegal in appengine (until Go 1.11). -# TODO: Remove when we drop Go 1.10 support -go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go - -# - gofmt, goimports, golint (with exceptions for generated code), go vet. -gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | not grep -vE "\.pb\.go" -golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" -go vet -all ./... - misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -# - Check that our modules are tidy. -if go help mod >& /dev/null; then - find . -name 'go.mod' | xargs -IXXX bash -c 'cd $(dirname XXX); go mod tidy' +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + + go mod tidy -compat=1.17 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) -fi + popd +done # - Collection of static analysis checks # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. @@ -159,7 +145,6 @@ grpc.NewGZIPDecompressor grpc.RPCCompressor grpc.RPCDecompressor grpc.ServiceConfig -grpc.WithBalancerName grpc.WithCompressor grpc.WithDecompressor grpc.WithDialer diff --git a/xds/bootstrap/bootstrap.go b/xds/bootstrap/bootstrap.go new file mode 100644 index 000000000000..fcb99bdfd967 --- /dev/null +++ b/xds/bootstrap/bootstrap.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bootstrap provides the functionality to register possible options +// for aspects of the xDS client through the bootstrap file. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed +// in a later release. +package bootstrap + +import ( + "encoding/json" + + "google.golang.org/grpc/credentials" +) + +// registry is a map from credential type name to Credential builder. +var registry = make(map[string]Credentials) + +// Credentials interface encapsulates a credentials.Bundle builder +// that can be used for communicating with the xDS Management server. +type Credentials interface { + // Build returns a credential bundle associated with this credential. + Build(config json.RawMessage) (credentials.Bundle, error) + // Name returns the credential name associated with this credential. + Name() string +} + +// RegisterCredentials registers Credentials used for connecting to the xds +// management server. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple credentials are +// registered with the same name, the one registered last will take effect. +func RegisterCredentials(c Credentials) { + registry[c.Name()] = c +} + +// GetCredentials returns the credentials associated with a given name. +// If no credentials are registered with the name, nil will be returned. +func GetCredentials(name string) Credentials { + if c, ok := registry[name]; ok { + return c + } + + return nil +} diff --git a/xds/bootstrap/bootstrap_test.go b/xds/bootstrap/bootstrap_test.go new file mode 100644 index 000000000000..80ae31ccd2e3 --- /dev/null +++ b/xds/bootstrap/bootstrap_test.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package bootstrap + +import ( + "encoding/json" + "testing" + + "google.golang.org/grpc/credentials" +) + +const testCredsBuilderName = "test_creds" + +var builder = &testCredsBuilder{} + +func init() { + RegisterCredentials(builder) +} + +type testCredsBuilder struct { + config json.RawMessage +} + +func (t *testCredsBuilder) Build(config json.RawMessage) (credentials.Bundle, error) { + t.config = config + return nil, nil +} + +func (t *testCredsBuilder) Name() string { + return testCredsBuilderName +} + +func TestRegisterNew(t *testing.T) { + c := GetCredentials(testCredsBuilderName) + if c == nil { + t.Fatalf("GetCredentials(%q) credential = nil", testCredsBuilderName) + } + + const sampleConfig = "sample_config" + rawMessage := json.RawMessage(sampleConfig) + if _, err := c.Build(rawMessage); err != nil { + t.Errorf("Build(%v) error = %v, want nil", rawMessage, err) + } + + if got, want := string(builder.config), sampleConfig; got != want { + t.Errorf("Build config = %v, want %v", got, want) + } +} diff --git a/xds/csds/csds.go b/xds/csds/csds.go index 73b92e9443ce..8d03124811a4 100644 --- a/xds/csds/csds.go +++ b/xds/csds/csds.go @@ -27,60 +27,54 @@ import ( "context" "fmt" "io" - "time" + "sync" - v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" - v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/timestamppb" - _ "google.golang.org/grpc/xds/internal/client/v2" // Register v2 xds_client. - _ "google.golang.org/grpc/xds/internal/client/v3" // Register v3 xds_client. + v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) -// xdsClientInterface contains methods from xdsClient.Client which are used by -// the server. This is useful for overriding in unit tests. -type xdsClientInterface interface { - DumpLDS() (string, map[string]client.UpdateWithMD) - DumpRDS() (string, map[string]client.UpdateWithMD) - DumpCDS() (string, map[string]client.UpdateWithMD) - DumpEDS() (string, map[string]client.UpdateWithMD) - BootstrapConfig() *bootstrap.Config - Close() -} +var logger = grpclog.Component("xds") -var ( - logger = grpclog.Component("xds") - newXDSClient = func() (xdsClientInterface, error) { - return client.New() - } -) +const prefix = "[csds-server %p] " + +func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, s)) +} -// ClientStatusDiscoveryServer implementations interface ClientStatusDiscoveryServiceServer. +// ClientStatusDiscoveryServer provides an implementation of the Client Status +// Discovery Service (CSDS) for exposing the xDS config of a given client. See +// https://github.com/envoyproxy/envoy/blob/main/api/envoy/service/status/v3/csds.proto. +// +// For more details about the gRPC implementation of CSDS, refer to gRPC A40 at: +// https://github.com/grpc/proposal/blob/master/A40-csds-support.md. type ClientStatusDiscoveryServer struct { - // xdsClient will always be the same in practise. But we keep a copy in each - // server instance for testing. - xdsClient xdsClientInterface + logger *internalgrpclog.PrefixLogger + + mu sync.Mutex + xdsClient xdsclient.XDSClient + xdsClientClose func() } -// NewClientStatusDiscoveryServer returns an implementation of the CSDS server that can be -// registered on a gRPC server. +// NewClientStatusDiscoveryServer returns an implementation of the CSDS server +// that can be registered on a gRPC server. func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { - xdsC, err := newXDSClient() + c, close, err := xdsclient.New() if err != nil { - return nil, fmt.Errorf("failed to create xds client: %v", err) + logger.Warningf("Failed to create xDS client: %v", err) } - return &ClientStatusDiscoveryServer{ - xdsClient: xdsC, - }, nil + s := &ClientStatusDiscoveryServer{xdsClient: c, xdsClientClose: close} + s.logger = prefixLogger(s) + s.logger.Infof("Created CSDS server, with xdsClient %p", c) + return s, nil } // StreamClientStatus implementations interface ClientStatusDiscoveryServiceServer. @@ -109,26 +103,28 @@ func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req * } // buildClientStatusRespForReq fetches the status from the client, and returns -// the response to be sent back to client. +// the response to be sent back to xdsclient. // // If it returns an error, the error is a status error. func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.xdsClient == nil { + return &v3statuspb.ClientStatusResponse{}, nil + } // Field NodeMatchers is unsupported, by design // https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching. if len(req.NodeMatchers) != 0 { return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers) } + dump := s.xdsClient.DumpResources() ret := &v3statuspb.ClientStatusResponse{ Config: []*v3statuspb.ClientConfig{ { - Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().NodeProto), - XdsConfig: []*v3statuspb.PerXdsConfig{ - s.buildLDSPerXDSConfig(), - s.buildRDSPerXDSConfig(), - s.buildCDSPerXDSConfig(), - s.buildEDSPerXDSConfig(), - }, + Node: s.xdsClient.BootstrapConfig().NodeProto, + GenericXdsConfigs: dumpToGenericXdsConfig(dump), }, }, } @@ -137,176 +133,47 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp // Close cleans up the resources. func (s *ClientStatusDiscoveryServer) Close() { - s.xdsClient.Close() -} - -// nodeProtoToV3 converts the given proto into a v3.Node. n is from bootstrap -// config, it can be either v2.Node or v3.Node. -// -// If n is already a v3.Node, return it. -// If n is v2.Node, marshal and unmarshal it to v3. -// Otherwise, return nil. -// -// The default case (not v2 or v3) is nil, instead of error, because the -// resources in the response are more important than the node. The worst case is -// that the user will receive no Node info, but will still get resources. -func nodeProtoToV3(n proto.Message) *v3corepb.Node { - var node *v3corepb.Node - switch nn := n.(type) { - case *v3corepb.Node: - node = nn - case *v2corepb.Node: - v2, err := proto.Marshal(nn) - if err != nil { - logger.Warningf("Failed to marshal node (%v): %v", n, err) - break - } - node = new(v3corepb.Node) - if err := proto.Unmarshal(v2, node); err != nil { - logger.Warningf("Failed to unmarshal node (%v): %v", v2, err) - } - default: - logger.Warningf("node from bootstrap is %#v, only v2.Node and v3.Node are supported", nn) - } - return node -} - -func (s *ClientStatusDiscoveryServer) buildLDSPerXDSConfig() *v3statuspb.PerXdsConfig { - version, dump := s.xdsClient.DumpLDS() - var resources []*v3adminpb.ListenersConfigDump_DynamicListener - for name, d := range dump { - configDump := &v3adminpb.ListenersConfigDump_DynamicListener{ - Name: name, - ClientStatus: serviceStatusToProto(d.MD.Status), - } - if (d.MD.Timestamp != time.Time{}) { - configDump.ActiveState = &v3adminpb.ListenersConfigDump_DynamicListenerState{ - VersionInfo: d.MD.Version, - Listener: d.Raw, - LastUpdated: timestamppb.New(d.MD.Timestamp), - } - } - if errState := d.MD.ErrState; errState != nil { - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, - } - } - resources = append(resources, configDump) - } - return &v3statuspb.PerXdsConfig{ - PerXdsConfig: &v3statuspb.PerXdsConfig_ListenerConfig{ - ListenerConfig: &v3adminpb.ListenersConfigDump{ - VersionInfo: version, - DynamicListeners: resources, - }, - }, + if s.xdsClientClose != nil { + s.xdsClientClose() } } -func (s *ClientStatusDiscoveryServer) buildRDSPerXDSConfig() *v3statuspb.PerXdsConfig { - _, dump := s.xdsClient.DumpRDS() - var resources []*v3adminpb.RoutesConfigDump_DynamicRouteConfig - for _, d := range dump { - configDump := &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ - VersionInfo: d.MD.Version, - ClientStatus: serviceStatusToProto(d.MD.Status), - } - if (d.MD.Timestamp != time.Time{}) { - configDump.RouteConfig = d.Raw - configDump.LastUpdated = timestamppb.New(d.MD.Timestamp) - } - if errState := d.MD.ErrState; errState != nil { - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, +func dumpToGenericXdsConfig(dump map[string]map[string]xdsresource.UpdateWithMD) []*v3statuspb.ClientConfig_GenericXdsConfig { + var ret []*v3statuspb.ClientConfig_GenericXdsConfig + for typeURL, updates := range dump { + for name, update := range updates { + config := &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: typeURL, + Name: name, + VersionInfo: update.MD.Version, + XdsConfig: update.Raw, + LastUpdated: timestamppb.New(update.MD.Timestamp), + ClientStatus: serviceStatusToProto(update.MD.Status), } - } - resources = append(resources, configDump) - } - return &v3statuspb.PerXdsConfig{ - PerXdsConfig: &v3statuspb.PerXdsConfig_RouteConfig{ - RouteConfig: &v3adminpb.RoutesConfigDump{ - DynamicRouteConfigs: resources, - }, - }, - } -} - -func (s *ClientStatusDiscoveryServer) buildCDSPerXDSConfig() *v3statuspb.PerXdsConfig { - version, dump := s.xdsClient.DumpCDS() - var resources []*v3adminpb.ClustersConfigDump_DynamicCluster - for _, d := range dump { - configDump := &v3adminpb.ClustersConfigDump_DynamicCluster{ - VersionInfo: d.MD.Version, - ClientStatus: serviceStatusToProto(d.MD.Status), - } - if (d.MD.Timestamp != time.Time{}) { - configDump.Cluster = d.Raw - configDump.LastUpdated = timestamppb.New(d.MD.Timestamp) - } - if errState := d.MD.ErrState; errState != nil { - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, - } - } - resources = append(resources, configDump) - } - return &v3statuspb.PerXdsConfig{ - PerXdsConfig: &v3statuspb.PerXdsConfig_ClusterConfig{ - ClusterConfig: &v3adminpb.ClustersConfigDump{ - VersionInfo: version, - DynamicActiveClusters: resources, - }, - }, - } -} - -func (s *ClientStatusDiscoveryServer) buildEDSPerXDSConfig() *v3statuspb.PerXdsConfig { - _, dump := s.xdsClient.DumpEDS() - var resources []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig - for _, d := range dump { - configDump := &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - VersionInfo: d.MD.Version, - ClientStatus: serviceStatusToProto(d.MD.Status), - } - if (d.MD.Timestamp != time.Time{}) { - configDump.EndpointConfig = d.Raw - configDump.LastUpdated = timestamppb.New(d.MD.Timestamp) - } - if errState := d.MD.ErrState; errState != nil { - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - LastUpdateAttempt: timestamppb.New(errState.Timestamp), - Details: errState.Err.Error(), - VersionInfo: errState.Version, + if errState := update.MD.ErrState; errState != nil { + config.ErrorState = &v3adminpb.UpdateFailureState{ + LastUpdateAttempt: timestamppb.New(errState.Timestamp), + Details: errState.Err.Error(), + VersionInfo: errState.Version, + } } + ret = append(ret, config) } - resources = append(resources, configDump) - } - return &v3statuspb.PerXdsConfig{ - PerXdsConfig: &v3statuspb.PerXdsConfig_EndpointConfig{ - EndpointConfig: &v3adminpb.EndpointsConfigDump{ - DynamicEndpointConfigs: resources, - }, - }, } + return ret } -func serviceStatusToProto(serviceStatus client.ServiceStatus) v3adminpb.ClientResourceStatus { +func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { switch serviceStatus { - case client.ServiceStatusUnknown: + case xdsresource.ServiceStatusUnknown: return v3adminpb.ClientResourceStatus_UNKNOWN - case client.ServiceStatusRequested: + case xdsresource.ServiceStatusRequested: return v3adminpb.ClientResourceStatus_REQUESTED - case client.ServiceStatusNotExist: + case xdsresource.ServiceStatusNotExist: return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST - case client.ServiceStatusACKed: + case xdsresource.ServiceStatusACKed: return v3adminpb.ClientResourceStatus_ACKED - case client.ServiceStatusNACKed: + case xdsresource.ServiceStatusNACKed: return v3adminpb.ClientResourceStatus_NACKED default: return v3adminpb.ClientResourceStatus_UNKNOWN diff --git a/xds/csds/csds_e2e_test.go b/xds/csds/csds_e2e_test.go new file mode 100644 index 000000000000..481e93929fa2 --- /dev/null +++ b/xds/csds/csds_e2e_test.go @@ -0,0 +1,461 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package csds_test + +import ( + "context" + "fmt" + "io" + "sort" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/csds" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + v3statuspbgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" + + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter +) + +const defaultTestTimeout = 5 * time.Second + +var cmpOpts = cmp.Options{ + cmp.Transformer("sort", func(in []*v3statuspb.ClientConfig_GenericXdsConfig) []*v3statuspb.ClientConfig_GenericXdsConfig { + out := append([]*v3statuspb.ClientConfig_GenericXdsConfig(nil), in...) + sort.Slice(out, func(i, j int) bool { + a, b := out[i], out[j] + if a == nil { + return true + } + if b == nil { + return false + } + if strings.Compare(a.TypeUrl, b.TypeUrl) == 0 { + return strings.Compare(a.Name, b.Name) < 0 + } + return strings.Compare(a.TypeUrl, b.TypeUrl) < 0 + }) + return out + }), + protocmp.Transform(), + protocmp.IgnoreFields((*v3statuspb.ClientConfig_GenericXdsConfig)(nil), "last_updated"), + protocmp.IgnoreFields((*v3adminpb.UpdateFailureState)(nil), "last_update_attempt", "details"), +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// The following watcher implementations are no-ops since we don't really care +// about the callback received by these watchers in the test. We only care +// whether CSDS reports the expected state. + +type unimplementedListenerWatcher struct{} + +func (unimplementedListenerWatcher) OnUpdate(*xdsresource.ListenerResourceData) {} +func (unimplementedListenerWatcher) OnError(error) {} +func (unimplementedListenerWatcher) OnResourceDoesNotExist() {} + +type unimplementedRouteConfigWatcher struct{} + +func (unimplementedRouteConfigWatcher) OnUpdate(*xdsresource.RouteConfigResourceData) {} +func (unimplementedRouteConfigWatcher) OnError(error) {} +func (unimplementedRouteConfigWatcher) OnResourceDoesNotExist() {} + +type unimplementedClusterWatcher struct{} + +func (unimplementedClusterWatcher) OnUpdate(*xdsresource.ClusterResourceData) {} +func (unimplementedClusterWatcher) OnError(error) {} +func (unimplementedClusterWatcher) OnResourceDoesNotExist() {} + +type unimplementedEndpointsWatcher struct{} + +func (unimplementedEndpointsWatcher) OnUpdate(*xdsresource.EndpointsResourceData) {} +func (unimplementedEndpointsWatcher) OnError(error) {} +func (unimplementedEndpointsWatcher) OnResourceDoesNotExist() {} + +func (s) TestCSDS(t *testing.T) { + // Spin up a xDS management server on a local port. + nodeID := uuid.New().String() + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + + // Create a bootstrap file in a temporary directory. + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer bootstrapCleanup() + + // Create an xDS client. This will end up using the same singleton as used + // by the CSDS service. + xdsC, close, err := xdsclient.New() + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Initialize an gRPC server and register CSDS on it. + server := grpc.NewServer() + csdss, err := csds.NewClientStatusDiscoveryServer() + if err != nil { + t.Fatal(err) + } + v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) + defer func() { + server.Stop() + csdss.Close() + }() + + // Create a local listener and pass it to Serve(). + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + // Create a client to the CSDS server. + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Failed to dial CSDS server %q: %v", lis.Addr().String(), err) + } + c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("Failed to create a stream for CSDS: %v", err) + } + defer conn.Close() + + // Verify that the xDS client reports an empty config. + if err := checkClientStatusResponse(stream, nil); err != nil { + t.Fatal(err) + } + + // Initialize the xDS resources to be used in this test. + ldsTargets := []string{"lds.target.good:0000", "lds.target.good:1111"} + rdsTargets := []string{"route-config-0", "route-config-1"} + cdsTargets := []string{"cluster-0", "cluster-1"} + edsTargets := []string{"endpoints-0", "endpoints-1"} + listeners := make([]*v3listenerpb.Listener, len(ldsTargets)) + listenerAnys := make([]*anypb.Any, len(ldsTargets)) + for i := range ldsTargets { + listeners[i] = e2e.DefaultClientListener(ldsTargets[i], rdsTargets[i]) + listenerAnys[i] = testutils.MarshalAny(listeners[i]) + } + routes := make([]*v3routepb.RouteConfiguration, len(rdsTargets)) + routeAnys := make([]*anypb.Any, len(rdsTargets)) + for i := range rdsTargets { + routes[i] = e2e.DefaultRouteConfig(rdsTargets[i], ldsTargets[i], cdsTargets[i]) + routeAnys[i] = testutils.MarshalAny(routes[i]) + } + clusters := make([]*v3clusterpb.Cluster, len(cdsTargets)) + clusterAnys := make([]*anypb.Any, len(cdsTargets)) + for i := range cdsTargets { + clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i], e2e.SecurityLevelNone) + clusterAnys[i] = testutils.MarshalAny(clusters[i]) + } + endpoints := make([]*v3endpointpb.ClusterLoadAssignment, len(edsTargets)) + endpointAnys := make([]*anypb.Any, len(edsTargets)) + ips := []string{"0.0.0.0", "1.1.1.1"} + ports := []uint32{123, 456} + for i := range edsTargets { + endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i:i+1]) + endpointAnys[i] = testutils.MarshalAny(endpoints[i]) + } + + // Register watches on the xDS client for two resources of each type. + for _, target := range ldsTargets { + xdsresource.WatchListener(xdsC, target, unimplementedListenerWatcher{}) + } + for _, target := range rdsTargets { + xdsresource.WatchRouteConfig(xdsC, target, unimplementedRouteConfigWatcher{}) + } + for _, target := range cdsTargets { + xdsresource.WatchCluster(xdsC, target, unimplementedClusterWatcher{}) + } + for _, target := range edsTargets { + xdsresource.WatchEndpoints(xdsC, target, unimplementedEndpointsWatcher{}) + } + + // Verify that the xDS client reports the resources as being in "Requested" + // state. + want := []*v3statuspb.ClientConfig_GenericXdsConfig{} + for i := range ldsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.listener.v3.Listener", ldsTargets[i], "", v3adminpb.ClientResourceStatus_REQUESTED, nil)) + } + for i := range rdsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", rdsTargets[i], "", v3adminpb.ClientResourceStatus_REQUESTED, nil)) + } + for i := range cdsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.cluster.v3.Cluster", cdsTargets[i], "", v3adminpb.ClientResourceStatus_REQUESTED, nil)) + } + for i := range edsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", edsTargets[i], "", v3adminpb.ClientResourceStatus_REQUESTED, nil)) + } + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for resources in \"Requested\" state: %v", err) + } + if err := checkClientStatusResponse(stream, want); err == nil { + break + } + time.Sleep(time.Millisecond * 100) + } + + // Configure the management server with two resources of each type, + // corresponding to the watches registered above. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: listeners, + Routes: routes, + Clusters: clusters, + Endpoints: endpoints, + }); err != nil { + t.Fatal(err) + } + + // Verify that the xDS client reports the resources as being in "ACKed" + // state, and in version "1". + want = nil + for i := range ldsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.listener.v3.Listener", ldsTargets[i], "1", v3adminpb.ClientResourceStatus_ACKED, listenerAnys[i])) + } + for i := range rdsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", rdsTargets[i], "1", v3adminpb.ClientResourceStatus_ACKED, routeAnys[i])) + } + for i := range cdsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.cluster.v3.Cluster", cdsTargets[i], "1", v3adminpb.ClientResourceStatus_ACKED, clusterAnys[i])) + } + for i := range edsTargets { + want = append(want, makeGenericXdsConfig("type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", edsTargets[i], "1", v3adminpb.ClientResourceStatus_ACKED, endpointAnys[i])) + } + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for resources in \"ACKed\" state: %v", err) + } + err := checkClientStatusResponse(stream, want) + if err == nil { + break + } + time.Sleep(time.Millisecond * 100) + } + + // Update the first resource of each type in the management server to a + // value which is expected to be NACK'ed by the xDS client. + const nackResourceIdx = 0 + listeners[nackResourceIdx].ApiListener = &v3listenerpb.ApiListener{} + routes[nackResourceIdx].VirtualHosts = []*v3routepb.VirtualHost{{Routes: []*v3routepb.Route{{}}}} + clusters[nackResourceIdx].ClusterDiscoveryType = &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC} + endpoints[nackResourceIdx].Endpoints = []*v3endpointpb.LocalityLbEndpoints{{}} + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: listeners, + Routes: routes, + Clusters: clusters, + Endpoints: endpoints, + SkipValidation: true, + }); err != nil { + t.Fatal(err) + } + + // Verify that the xDS client reports the first resource of each type as + // being in "NACKed" state, and the second resource of each type to be in + // "ACKed" state. The version for the ACKed resource would be "2", while + // that for the NACKed resource would be "1". In the NACKed resource, the + // version which is NACKed is stored in the ErrorState field. + want = nil + for i := range ldsTargets { + config := makeGenericXdsConfig("type.googleapis.com/envoy.config.listener.v3.Listener", ldsTargets[i], "2", v3adminpb.ClientResourceStatus_ACKED, listenerAnys[i]) + if i == nackResourceIdx { + config.VersionInfo = "1" + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{VersionInfo: "2"} + } + want = append(want, config) + } + for i := range rdsTargets { + config := makeGenericXdsConfig("type.googleapis.com/envoy.config.route.v3.RouteConfiguration", rdsTargets[i], "2", v3adminpb.ClientResourceStatus_ACKED, routeAnys[i]) + if i == nackResourceIdx { + config.VersionInfo = "1" + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{VersionInfo: "2"} + } + want = append(want, config) + } + for i := range cdsTargets { + config := makeGenericXdsConfig("type.googleapis.com/envoy.config.cluster.v3.Cluster", cdsTargets[i], "2", v3adminpb.ClientResourceStatus_ACKED, clusterAnys[i]) + if i == nackResourceIdx { + config.VersionInfo = "1" + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{VersionInfo: "2"} + } + want = append(want, config) + } + for i := range edsTargets { + config := makeGenericXdsConfig("type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", edsTargets[i], "2", v3adminpb.ClientResourceStatus_ACKED, endpointAnys[i]) + if i == nackResourceIdx { + config.VersionInfo = "1" + config.ClientStatus = v3adminpb.ClientResourceStatus_NACKED + config.ErrorState = &v3adminpb.UpdateFailureState{VersionInfo: "2"} + } + want = append(want, config) + } + for { + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout when waiting for resources in \"NACKed\" state: %v", err) + } + err := checkClientStatusResponse(stream, want) + if err == nil { + break + } + time.Sleep(time.Millisecond * 100) + } +} + +func makeGenericXdsConfig(typeURL, name, version string, status v3adminpb.ClientResourceStatus, config *anypb.Any) *v3statuspb.ClientConfig_GenericXdsConfig { + return &v3statuspb.ClientConfig_GenericXdsConfig{ + TypeUrl: typeURL, + Name: name, + VersionInfo: version, + ClientStatus: status, + XdsConfig: config, + } +} + +func checkClientStatusResponse(stream v3statuspbgrpc.ClientStatusDiscoveryService_StreamClientStatusClient, want []*v3statuspb.ClientConfig_GenericXdsConfig) error { + if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { + if err != io.EOF { + return fmt.Errorf("failed to send ClientStatusRequest: %v", err) + } + // If the stream has closed, we call Recv() until it returns a non-nil + // error to get the actual error on the stream. + for { + if _, err := stream.Recv(); err != nil { + return fmt.Errorf("failed to recv ClientStatusResponse: %v", err) + } + } + } + resp, err := stream.Recv() + if err != nil { + return fmt.Errorf("failed to recv ClientStatusResponse: %v", err) + } + + if n := len(resp.Config); n != 1 { + return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(resp)) + } + + if diff := cmp.Diff(resp.Config[0].GenericXdsConfigs, want, cmpOpts); diff != "" { + return fmt.Errorf(diff) + } + return nil +} + +func (s) TestCSDSNoXDSClient(t *testing.T) { + // Create a bootstrap file in a temporary directory. Since we pass empty + // options, it would end up creating a bootstrap file with an empty + // serverURI which will fail xDS client creation. + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{}) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { bootstrapCleanup() }) + + // Initialize an gRPC server and register CSDS on it. + server := grpc.NewServer() + csdss, err := csds.NewClientStatusDiscoveryServer() + if err != nil { + t.Fatal(err) + } + defer csdss.Close() + v3statuspbgrpc.RegisterClientStatusDiscoveryServiceServer(server, csdss) + + // Create a local listener and pass it to Serve(). + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + defer server.Stop() + + // Create a client to the CSDS server. + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("Failed to dial CSDS server %q: %v", lis.Addr().String(), err) + } + defer conn.Close() + c := v3statuspbgrpc.NewClientStatusDiscoveryServiceClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) + if err != nil { + t.Fatalf("Failed to create a stream for CSDS: %v", err) + } + + if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { + t.Fatalf("Failed to send ClientStatusRequest: %v", err) + } + r, err := stream.Recv() + if err != nil { + // io.EOF is not ok. + t.Fatalf("Failed to recv ClientStatusResponse: %v", err) + } + if n := len(r.Config); n != 0 { + t.Fatalf("got %d configs, want 0: %v", n, proto.MarshalTextString(r)) + } +} diff --git a/xds/csds/csds_test.go b/xds/csds/csds_test.go deleted file mode 100644 index 2993beea0e5d..000000000000 --- a/xds/csds/csds_test.go +++ /dev/null @@ -1,685 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package csds - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/xds/internal/client" - _ "google.golang.org/grpc/xds/internal/httpfilter/router" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/e2e" - "google.golang.org/protobuf/testing/protocmp" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/protobuf/types/known/timestamppb" - - v3adminpb "github.com/envoyproxy/go-control-plane/envoy/admin/v3" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" -) - -const ( - defaultTestTimeout = 10 * time.Second -) - -type xdsClientInterfaceWithWatch interface { - WatchListener(string, func(client.ListenerUpdate, error)) func() - WatchRouteConfig(string, func(client.RouteConfigUpdate, error)) func() - WatchCluster(string, func(client.ClusterUpdate, error)) func() - WatchEndpoints(string, func(client.EndpointsUpdate, error)) func() -} - -var cmpOpts = cmp.Options{ - cmpopts.EquateEmpty(), - cmp.Comparer(func(a, b *timestamppb.Timestamp) bool { return true }), - protocmp.IgnoreFields(&v3adminpb.UpdateFailureState{}, "last_update_attempt", "details"), - protocmp.SortRepeated(func(a, b *v3adminpb.ListenersConfigDump_DynamicListener) bool { - return strings.Compare(a.Name, b.Name) < 0 - }), - protocmp.SortRepeated(func(a, b *v3adminpb.RoutesConfigDump_DynamicRouteConfig) bool { - if a.RouteConfig == nil { - return false - } - if b.RouteConfig == nil { - return true - } - var at, bt v3routepb.RouteConfiguration - if err := ptypes.UnmarshalAny(a.RouteConfig, &at); err != nil { - panic("failed to unmarshal RouteConfig" + err.Error()) - } - if err := ptypes.UnmarshalAny(b.RouteConfig, &bt); err != nil { - panic("failed to unmarshal RouteConfig" + err.Error()) - } - return strings.Compare(at.Name, bt.Name) < 0 - }), - protocmp.SortRepeated(func(a, b *v3adminpb.ClustersConfigDump_DynamicCluster) bool { - if a.Cluster == nil { - return false - } - if b.Cluster == nil { - return true - } - var at, bt v3clusterpb.Cluster - if err := ptypes.UnmarshalAny(a.Cluster, &at); err != nil { - panic("failed to unmarshal Cluster" + err.Error()) - } - if err := ptypes.UnmarshalAny(b.Cluster, &bt); err != nil { - panic("failed to unmarshal Cluster" + err.Error()) - } - return strings.Compare(at.Name, bt.Name) < 0 - }), - protocmp.SortRepeated(func(a, b *v3adminpb.EndpointsConfigDump_DynamicEndpointConfig) bool { - if a.EndpointConfig == nil { - return false - } - if b.EndpointConfig == nil { - return true - } - var at, bt v3endpointpb.ClusterLoadAssignment - if err := ptypes.UnmarshalAny(a.EndpointConfig, &at); err != nil { - panic("failed to unmarshal Endpoints" + err.Error()) - } - if err := ptypes.UnmarshalAny(b.EndpointConfig, &bt); err != nil { - panic("failed to unmarshal Endpoints" + err.Error()) - } - return strings.Compare(at.ClusterName, bt.ClusterName) < 0 - }), - protocmp.IgnoreFields(&v3adminpb.ListenersConfigDump_DynamicListenerState{}, "last_updated"), - protocmp.IgnoreFields(&v3adminpb.RoutesConfigDump_DynamicRouteConfig{}, "last_updated"), - protocmp.IgnoreFields(&v3adminpb.ClustersConfigDump_DynamicCluster{}, "last_updated"), - protocmp.IgnoreFields(&v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{}, "last_updated"), - protocmp.Transform(), -} - -var ( - ldsTargets = []string{"lds.target.good:0000", "lds.target.good:1111"} - listeners = make([]*v3listenerpb.Listener, len(ldsTargets)) - listenerAnys = make([]*anypb.Any, len(ldsTargets)) - - rdsTargets = []string{"route-config-0", "route-config-1"} - routes = make([]*v3routepb.RouteConfiguration, len(rdsTargets)) - routeAnys = make([]*anypb.Any, len(rdsTargets)) - - cdsTargets = []string{"cluster-0", "cluster-1"} - clusters = make([]*v3clusterpb.Cluster, len(cdsTargets)) - clusterAnys = make([]*anypb.Any, len(cdsTargets)) - - edsTargets = []string{"endpoints-0", "endpoints-1"} - endpoints = make([]*v3endpointpb.ClusterLoadAssignment, len(edsTargets)) - endpointAnys = make([]*anypb.Any, len(edsTargets)) - ips = []string{"0.0.0.0", "1.1.1.1"} - ports = []uint32{123, 456} -) - -func init() { - for i := range ldsTargets { - listeners[i] = e2e.DefaultListener(ldsTargets[i], rdsTargets[i]) - listenerAnys[i], _ = ptypes.MarshalAny(listeners[i]) - } - for i := range rdsTargets { - routes[i] = e2e.DefaultRouteConfig(rdsTargets[i], ldsTargets[i], cdsTargets[i]) - routeAnys[i], _ = ptypes.MarshalAny(routes[i]) - } - for i := range cdsTargets { - clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i]) - clusterAnys[i], _ = ptypes.MarshalAny(clusters[i]) - } - for i := range edsTargets { - endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i]) - endpointAnys[i], _ = ptypes.MarshalAny(endpoints[i]) - } -} - -func TestCSDS(t *testing.T) { - const retryCount = 10 - - xdsC, mgmServer, nodeID, stream, cleanup := commonSetup(t) - defer cleanup() - - for _, target := range ldsTargets { - xdsC.WatchListener(target, func(client.ListenerUpdate, error) {}) - } - for _, target := range rdsTargets { - xdsC.WatchRouteConfig(target, func(client.RouteConfigUpdate, error) {}) - } - for _, target := range cdsTargets { - xdsC.WatchCluster(target, func(client.ClusterUpdate, error) {}) - } - for _, target := range edsTargets { - xdsC.WatchEndpoints(target, func(client.EndpointsUpdate, error) {}) - } - - for i := 0; i < retryCount; i++ { - err := checkForRequested(stream) - if err == nil { - break - } - if i == retryCount-1 { - t.Fatalf("%v", err) - } - time.Sleep(time.Millisecond * 100) - } - - if err := mgmServer.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: listeners, - Routes: routes, - Clusters: clusters, - Endpoints: endpoints, - }); err != nil { - t.Fatal(err) - } - for i := 0; i < retryCount; i++ { - err := checkForACKed(stream) - if err == nil { - break - } - if i == retryCount-1 { - t.Fatalf("%v", err) - } - time.Sleep(time.Millisecond * 100) - } - - const nackResourceIdx = 0 - if err := mgmServer.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{ - {Name: ldsTargets[nackResourceIdx], ApiListener: &v3listenerpb.ApiListener{}}, // 0 will be nacked. 1 will stay the same. - }, - Routes: []*v3routepb.RouteConfiguration{ - {Name: rdsTargets[nackResourceIdx], VirtualHosts: []*v3routepb.VirtualHost{{ - Routes: []*v3routepb.Route{{}}, - }}}, - }, - Clusters: []*v3clusterpb.Cluster{ - {Name: cdsTargets[nackResourceIdx], ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}}, - }, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{ - {ClusterName: edsTargets[nackResourceIdx], Endpoints: []*v3endpointpb.LocalityLbEndpoints{{}}}, - }, - SkipValidation: true, - }); err != nil { - t.Fatal(err) - } - for i := 0; i < retryCount; i++ { - err := checkForNACKed(nackResourceIdx, stream) - if err == nil { - break - } - if i == retryCount-1 { - t.Fatalf("%v", err) - } - time.Sleep(time.Millisecond * 100) - } -} - -func commonSetup(t *testing.T) (xdsClientInterfaceWithWatch, *e2e.ManagementServer, string, v3statuspb.ClientStatusDiscoveryService_StreamClientStatusClient, func()) { - t.Helper() - - // Spin up a xDS management server on a local port. - nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() - if err != nil { - t.Fatal(err) - } - - // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xds.SetupBootstrapFile(xds.BootstrapOptions{ - Version: xds.TransportV3, - NodeID: nodeID, - ServerURI: fs.Address, - }) - if err != nil { - t.Fatal(err) - } - // Create xds_client. - xdsC, err := client.New() - if err != nil { - t.Fatalf("failed to create xds client: %v", err) - } - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { - return xdsC, nil - } - - // Initialize an gRPC server and register CSDS on it. - server := grpc.NewServer() - csdss, err := NewClientStatusDiscoveryServer() - if err != nil { - t.Fatal(err) - } - v3statuspb.RegisterClientStatusDiscoveryServiceServer(server, csdss) - // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - go func() { - if err := server.Serve(lis); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - - // Create CSDS client. - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) - if err != nil { - t.Fatalf("cannot connect to server: %v", err) - } - c := v3statuspb.NewClientStatusDiscoveryServiceClient(conn) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - stream, err := c.StreamClientStatus(ctx, grpc.WaitForReady(true)) - if err != nil { - t.Fatalf("cannot get ServerReflectionInfo: %v", err) - } - - return xdsC, fs, nodeID, stream, func() { - fs.Stop() - cancel() - conn.Close() - server.Stop() - csdss.Close() - newXDSClient = oldNewXDSClient - xdsC.Close() - bootstrapCleanup() - } -} - -func checkForRequested(stream v3statuspb.ClientStatusDiscoveryService_StreamClientStatusClient) error { - if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { - return fmt.Errorf("failed to send request: %v", err) - } - r, err := stream.Recv() - if err != nil { - // io.EOF is not ok. - return fmt.Errorf("failed to recv response: %v", err) - } - - if n := len(r.Config); n != 1 { - return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) - } - if n := len(r.Config[0].XdsConfig); n != 4 { - return fmt.Errorf("got %d xds configs (one for each type), want 4: %v", n, proto.MarshalTextString(r)) - } - for _, cfg := range r.Config[0].XdsConfig { - switch config := cfg.PerXdsConfig.(type) { - case *v3statuspb.PerXdsConfig_ListenerConfig: - var wantLis []*v3adminpb.ListenersConfigDump_DynamicListener - for i := range ldsTargets { - wantLis = append(wantLis, &v3adminpb.ListenersConfigDump_DynamicListener{ - Name: ldsTargets[i], - ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - wantDump := &v3adminpb.ListenersConfigDump{ - DynamicListeners: wantLis, - } - if diff := cmp.Diff(config.ListenerConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_RouteConfig: - var wantRoutes []*v3adminpb.RoutesConfigDump_DynamicRouteConfig - for range rdsTargets { - wantRoutes = append(wantRoutes, &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ - ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - wantDump := &v3adminpb.RoutesConfigDump{ - DynamicRouteConfigs: wantRoutes, - } - if diff := cmp.Diff(config.RouteConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_ClusterConfig: - var wantCluster []*v3adminpb.ClustersConfigDump_DynamicCluster - for range cdsTargets { - wantCluster = append(wantCluster, &v3adminpb.ClustersConfigDump_DynamicCluster{ - ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - wantDump := &v3adminpb.ClustersConfigDump{ - DynamicActiveClusters: wantCluster, - } - if diff := cmp.Diff(config.ClusterConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_EndpointConfig: - var wantEndpoint []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig - for range cdsTargets { - wantEndpoint = append(wantEndpoint, &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - ClientStatus: v3adminpb.ClientResourceStatus_REQUESTED, - }) - } - wantDump := &v3adminpb.EndpointsConfigDump{ - DynamicEndpointConfigs: wantEndpoint, - } - if diff := cmp.Diff(config.EndpointConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - default: - return fmt.Errorf("unexpected PerXdsConfig: %+v; %v", cfg.PerXdsConfig, protoToJSON(r)) - } - } - return nil -} - -func checkForACKed(stream v3statuspb.ClientStatusDiscoveryService_StreamClientStatusClient) error { - const wantVersion = "1" - - if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { - return fmt.Errorf("failed to send: %v", err) - } - r, err := stream.Recv() - if err != nil { - // io.EOF is not ok. - return fmt.Errorf("failed to recv response: %v", err) - } - - if n := len(r.Config); n != 1 { - return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) - } - if n := len(r.Config[0].XdsConfig); n != 4 { - return fmt.Errorf("got %d xds configs (one for each type), want 4: %v", n, proto.MarshalTextString(r)) - } - for _, cfg := range r.Config[0].XdsConfig { - switch config := cfg.PerXdsConfig.(type) { - case *v3statuspb.PerXdsConfig_ListenerConfig: - var wantLis []*v3adminpb.ListenersConfigDump_DynamicListener - for i := range ldsTargets { - wantLis = append(wantLis, &v3adminpb.ListenersConfigDump_DynamicListener{ - Name: ldsTargets[i], - ActiveState: &v3adminpb.ListenersConfigDump_DynamicListenerState{ - VersionInfo: wantVersion, - Listener: listenerAnys[i], - LastUpdated: nil, - }, - ErrorState: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - wantDump := &v3adminpb.ListenersConfigDump{ - VersionInfo: wantVersion, - DynamicListeners: wantLis, - } - if diff := cmp.Diff(config.ListenerConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_RouteConfig: - var wantRoutes []*v3adminpb.RoutesConfigDump_DynamicRouteConfig - for i := range rdsTargets { - wantRoutes = append(wantRoutes, &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ - VersionInfo: wantVersion, - RouteConfig: routeAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - wantDump := &v3adminpb.RoutesConfigDump{ - DynamicRouteConfigs: wantRoutes, - } - if diff := cmp.Diff(config.RouteConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_ClusterConfig: - var wantCluster []*v3adminpb.ClustersConfigDump_DynamicCluster - for i := range cdsTargets { - wantCluster = append(wantCluster, &v3adminpb.ClustersConfigDump_DynamicCluster{ - VersionInfo: wantVersion, - Cluster: clusterAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - wantDump := &v3adminpb.ClustersConfigDump{ - VersionInfo: wantVersion, - DynamicActiveClusters: wantCluster, - } - if diff := cmp.Diff(config.ClusterConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_EndpointConfig: - var wantEndpoint []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig - for i := range cdsTargets { - wantEndpoint = append(wantEndpoint, &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - VersionInfo: wantVersion, - EndpointConfig: endpointAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - }) - } - wantDump := &v3adminpb.EndpointsConfigDump{ - DynamicEndpointConfigs: wantEndpoint, - } - if diff := cmp.Diff(config.EndpointConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - default: - return fmt.Errorf("unexpected PerXdsConfig: %+v; %v", cfg.PerXdsConfig, protoToJSON(r)) - } - } - return nil -} - -func checkForNACKed(nackResourceIdx int, stream v3statuspb.ClientStatusDiscoveryService_StreamClientStatusClient) error { - const ( - ackVersion = "1" - nackVersion = "2" - ) - - if err := stream.Send(&v3statuspb.ClientStatusRequest{Node: nil}); err != nil { - return fmt.Errorf("failed to send: %v", err) - } - r, err := stream.Recv() - if err != nil { - // io.EOF is not ok. - return fmt.Errorf("failed to recv response: %v", err) - } - - if n := len(r.Config); n != 1 { - return fmt.Errorf("got %d configs, want 1: %v", n, proto.MarshalTextString(r)) - } - if n := len(r.Config[0].XdsConfig); n != 4 { - return fmt.Errorf("got %d xds configs (one for each type), want 4: %v", n, proto.MarshalTextString(r)) - } - for _, cfg := range r.Config[0].XdsConfig { - switch config := cfg.PerXdsConfig.(type) { - case *v3statuspb.PerXdsConfig_ListenerConfig: - var wantLis []*v3adminpb.ListenersConfigDump_DynamicListener - for i := range ldsTargets { - configDump := &v3adminpb.ListenersConfigDump_DynamicListener{ - Name: ldsTargets[i], - ActiveState: &v3adminpb.ListenersConfigDump_DynamicListenerState{ - VersionInfo: ackVersion, - Listener: listenerAnys[i], - LastUpdated: nil, - }, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - wantLis = append(wantLis, configDump) - } - wantDump := &v3adminpb.ListenersConfigDump{ - VersionInfo: nackVersion, - DynamicListeners: wantLis, - } - if diff := cmp.Diff(config.ListenerConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_RouteConfig: - var wantRoutes []*v3adminpb.RoutesConfigDump_DynamicRouteConfig - for i := range rdsTargets { - configDump := &v3adminpb.RoutesConfigDump_DynamicRouteConfig{ - VersionInfo: ackVersion, - RouteConfig: routeAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - wantRoutes = append(wantRoutes, configDump) - } - wantDump := &v3adminpb.RoutesConfigDump{ - DynamicRouteConfigs: wantRoutes, - } - if diff := cmp.Diff(config.RouteConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_ClusterConfig: - var wantCluster []*v3adminpb.ClustersConfigDump_DynamicCluster - for i := range cdsTargets { - configDump := &v3adminpb.ClustersConfigDump_DynamicCluster{ - VersionInfo: ackVersion, - Cluster: clusterAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - wantCluster = append(wantCluster, configDump) - } - wantDump := &v3adminpb.ClustersConfigDump{ - VersionInfo: nackVersion, - DynamicActiveClusters: wantCluster, - } - if diff := cmp.Diff(config.ClusterConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - case *v3statuspb.PerXdsConfig_EndpointConfig: - var wantEndpoint []*v3adminpb.EndpointsConfigDump_DynamicEndpointConfig - for i := range cdsTargets { - configDump := &v3adminpb.EndpointsConfigDump_DynamicEndpointConfig{ - VersionInfo: ackVersion, - EndpointConfig: endpointAnys[i], - LastUpdated: nil, - ClientStatus: v3adminpb.ClientResourceStatus_ACKED, - } - if i == nackResourceIdx { - configDump.ClientStatus = v3adminpb.ClientResourceStatus_NACKED - configDump.ErrorState = &v3adminpb.UpdateFailureState{ - Details: "blahblah", - VersionInfo: nackVersion, - } - } - wantEndpoint = append(wantEndpoint, configDump) - } - wantDump := &v3adminpb.EndpointsConfigDump{ - DynamicEndpointConfigs: wantEndpoint, - } - if diff := cmp.Diff(config.EndpointConfig, wantDump, cmpOpts); diff != "" { - return fmt.Errorf(diff) - } - default: - return fmt.Errorf("unexpected PerXdsConfig: %+v; %v", cfg.PerXdsConfig, protoToJSON(r)) - } - } - return nil -} - -func protoToJSON(p proto.Message) string { - mm := jsonpb.Marshaler{ - Indent: " ", - } - ret, _ := mm.MarshalToString(p) - return ret -} - -func Test_nodeProtoToV3(t *testing.T) { - const ( - testID = "test-id" - testCluster = "test-cluster" - testZone = "test-zone" - ) - tests := []struct { - name string - n proto.Message - want *v3corepb.Node - }{ - { - name: "v3", - n: &v3corepb.Node{ - Id: testID, - Cluster: testCluster, - Locality: &v3corepb.Locality{Zone: testZone}, - }, - want: &v3corepb.Node{ - Id: testID, - Cluster: testCluster, - Locality: &v3corepb.Locality{Zone: testZone}, - }, - }, - { - name: "v2", - n: &v2corepb.Node{ - Id: testID, - Cluster: testCluster, - Locality: &v2corepb.Locality{Zone: testZone}, - }, - want: &v3corepb.Node{ - Id: testID, - Cluster: testCluster, - Locality: &v3corepb.Locality{Zone: testZone}, - }, - }, - { - name: "not node", - n: &v2corepb.Locality{Zone: testZone}, - want: nil, // Input is not a node, should return nil. - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := nodeProtoToV3(tt.n) - if diff := cmp.Diff(got, tt.want, protocmp.Transform()); diff != "" { - t.Errorf("nodeProtoToV3() got unexpected result, diff (-got, +want): %v", diff) - } - }) - } -} diff --git a/xds/googledirectpath/googlec2p.go b/xds/googledirectpath/googlec2p.go index 4ccec4ec4120..f8f749835c24 100644 --- a/xds/googledirectpath/googlec2p.go +++ b/xds/googledirectpath/googlec2p.go @@ -27,27 +27,31 @@ package googledirectpath import ( "fmt" + "net/url" "time" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/google" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/googlecloud" internalgrpclog "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" - _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/types/known/structpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + + _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. ) const ( - c2pScheme = "google-c2p" + c2pScheme = "google-c2p" + c2pExperimentalScheme = "google-c2p-experimental" + c2pAuthority = "traffic-director-c2p.xds.googleapis.com" - tdURL = "directpath-trafficdirector.googleapis.com" + tdURL = "dns:///directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" @@ -61,15 +65,11 @@ const ( dnsName, xdsName = "dns", "xds" ) -type xdsClientInterface interface { - Close() -} - // For overriding in unittests. var ( onGCE = googlecloud.OnGCE - newClientWithConfig = func(config *bootstrap.Config) (xdsClientInterface, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, func(), error) { return xdsclient.NewWithConfig(config) } @@ -77,17 +77,27 @@ var ( ) func init() { - if env.C2PResolverSupport { - resolver.Register(c2pResolverBuilder{}) - } + resolver.Register(c2pResolverBuilder{ + scheme: c2pScheme, + }) + // TODO(apolcyn): remove this experimental scheme before the 1.52 release + resolver.Register(c2pResolverBuilder{ + scheme: c2pExperimentalScheme, + }) } -type c2pResolverBuilder struct{} +type c2pResolverBuilder struct { + scheme string +} func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if t.URL.Host != "" { + return nil, fmt.Errorf("google-c2p URI scheme does not support authorities") + } + if !runDirectPath() { // If not xDS, fallback to DNS. - t.Scheme = dnsName + t.URL.Scheme = dnsName return resolver.Get(dnsName).Build(t, cc, opts) } @@ -101,49 +111,71 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts go func() { zoneCh <- getZone(httpReqTimeout) }() go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }() - balancerName := env.C2PResolverTestOnlyTrafficDirectorURI + balancerName := envconfig.C2PResolverTestOnlyTrafficDirectorURI if balancerName == "" { balancerName = tdURL } + serverConfig, err := bootstrap.ServerConfigFromJSON([]byte(fmt.Sprintf(` + { + "server_uri": "%s", + "channel_creds": [{"type": "google_default"}], + "server_features": ["xds_v3", "ignore_resource_deletion"] + }`, balancerName))) + if err != nil { + return nil, fmt.Errorf("failed to build bootstrap configuration: %v", err) + } config := &bootstrap.Config{ - BalancerName: balancerName, - Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()), - TransportAPI: version.TransportV3, - NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), + XDSServer: serverConfig, + ClientDefaultListenerResourceNameTemplate: "%s", + Authorities: map[string]*bootstrap.Authority{ + c2pAuthority: { + XDSServer: serverConfig, + }, + }, + NodeProto: newNode(<-zoneCh, <-ipv6CapableCh), } // Create singleton xds client with this config. The xds client will be // used by the xds resolver later. - xdsC, err := newClientWithConfig(config) + _, close, err := newClientWithConfig(config) if err != nil { return nil, fmt.Errorf("failed to start xDS client: %v", err) } // Create and return an xDS resolver. - t.Scheme = xdsName + t.URL.Scheme = xdsName + if envconfig.XDSFederation { + t = resolver.Target{ + URL: url.URL{ + Scheme: xdsName, + Host: c2pAuthority, + Path: t.URL.Path, + }, + } + } xdsR, err := resolver.Get(xdsName).Build(t, cc, opts) if err != nil { - xdsC.Close() + close() return nil, err } return &c2pResolver{ - Resolver: xdsR, - client: xdsC, + Resolver: xdsR, + clientCloseFunc: close, }, nil } -func (c2pResolverBuilder) Scheme() string { - return c2pScheme +func (b c2pResolverBuilder) Scheme() string { + return b.scheme } type c2pResolver struct { resolver.Resolver - client xdsClientInterface + clientCloseFunc func() } func (r *c2pResolver) Close() { r.Resolver.Close() - r.client.Close() + r.clientCloseFunc() } var ipv6EnabledMetadata = &structpb.Struct{ @@ -152,13 +184,15 @@ var ipv6EnabledMetadata = &structpb.Struct{ }, } +var id = fmt.Sprintf("C2P-%d", grpcrand.Int()) + // newNode makes a copy of defaultNode, and populate it's Metadata and // Locality fields. func newNode(zone string, ipv6Capable bool) *v3corepb.Node { ret := &v3corepb.Node{ // Not all required fields are set in defaultNote. Metadata will be set // if ipv6 is enabled. Locality will be set to the value from metadata. - Id: "C2P", + Id: id, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, ClientFeatures: []string{clientFeatureNoOverprovisioning}, @@ -173,7 +207,10 @@ func newNode(zone string, ipv6Capable bool) *v3corepb.Node { // runDirectPath returns whether this resolver should use direct path. // // direct path is enabled if this client is running on GCE, and the normal xDS -// is not used (bootstrap env vars are not set). +// is not used (bootstrap env vars are not set) or federation is enabled. func runDirectPath() bool { - return env.BootstrapFileName == "" && env.BootstrapFileContent == "" && onGCE() + if !onGCE() { + return false + } + return envconfig.XDSFederation || envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" } diff --git a/xds/googledirectpath/googlec2p_test.go b/xds/googledirectpath/googlec2p_test.go index 2dd31d754f3f..44e1a68e2387 100644 --- a/xds/googledirectpath/googlec2p_test.go +++ b/xds/googledirectpath/googlec2p_test.go @@ -19,20 +19,24 @@ package googledirectpath import ( + "fmt" "strconv" + "strings" "testing" "time" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc" - "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/client/bootstrap" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" "google.golang.org/protobuf/types/known/structpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) type emptyResolver struct { @@ -89,7 +93,7 @@ func TestBuildWithBootstrapEnvSet(t *testing.T) { defer replaceResolvers()() builder := resolver.Get(c2pScheme) - for i, envP := range []*string{&env.BootstrapFileName, &env.BootstrapFileContent} { + for i, envP := range []*string{&envconfig.XDSBootstrapFileName, &envconfig.XDSBootstrapFileContent} { t.Run(strconv.Itoa(i), func(t *testing.T) { // Set bootstrap config env var. oldEnv := *envP @@ -128,6 +132,7 @@ func TestBuildNotOnGCE(t *testing.T) { } type testXDSClient struct { + xdsclient.XDSClient closed chan struct{} } @@ -164,10 +169,10 @@ func TestBuildXDS(t *testing.T) { defer func() { getIPv6Capable = oldGetIPv6Capability }() if tt.tdURI != "" { - oldURI := env.C2PResolverTestOnlyTrafficDirectorURI - env.C2PResolverTestOnlyTrafficDirectorURI = tt.tdURI + oldURI := envconfig.C2PResolverTestOnlyTrafficDirectorURI + envconfig.C2PResolverTestOnlyTrafficDirectorURI = tt.tdURI defer func() { - env.C2PResolverTestOnlyTrafficDirectorURI = oldURI + envconfig.C2PResolverTestOnlyTrafficDirectorURI = oldURI }() } @@ -175,9 +180,9 @@ func TestBuildXDS(t *testing.T) { configCh := make(chan *bootstrap.Config, 1) oldNewClient := newClientWithConfig - newClientWithConfig = func(config *bootstrap.Config) (xdsClientInterface, error) { + newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, func(), error) { configCh <- config - return tXDSClient, nil + return tXDSClient, func() { tXDSClient.Close() }, nil } defer func() { newClientWithConfig = oldNewClient }() @@ -192,7 +197,7 @@ func TestBuildXDS(t *testing.T) { } wantNode := &v3corepb.Node{ - Id: "C2P", + Id: id, Metadata: nil, Locality: &v3corepb.Locality{Zone: testZone}, UserAgentName: gRPCUserAgentName, @@ -208,22 +213,36 @@ func TestBuildXDS(t *testing.T) { }, } } + wantServerConfig, err := bootstrap.ServerConfigFromJSON([]byte(fmt.Sprintf(`{ + "server_uri": "%s", + "channel_creds": [{"type": "google_default"}], + "server_features": ["xds_v3", "ignore_resource_deletion"] + }`, tdURL))) + if err != nil { + t.Fatalf("Failed to build server bootstrap config: %v", err) + } wantConfig := &bootstrap.Config{ - BalancerName: tdURL, - TransportAPI: version.TransportV3, - NodeProto: wantNode, + XDSServer: wantServerConfig, + ClientDefaultListenerResourceNameTemplate: "%s", + Authorities: map[string]*bootstrap.Authority{ + "traffic-director-c2p.xds.googleapis.com": { + XDSServer: wantServerConfig, + }, + }, + NodeProto: wantNode, } if tt.tdURI != "" { - wantConfig.BalancerName = tt.tdURI + wantConfig.XDSServer.ServerURI = tt.tdURI } cmpOpts := cmp.Options{ - cmpopts.IgnoreFields(bootstrap.Config{}, "Creds"), + cmpopts.IgnoreFields(bootstrap.ServerConfig{}, "Creds"), + cmp.AllowUnexported(bootstrap.ServerConfig{}), protocmp.Transform(), } select { - case c := <-configCh: - if diff := cmp.Diff(c, wantConfig, cmpOpts); diff != "" { - t.Fatalf("%v", diff) + case gotConfig := <-configCh: + if diff := cmp.Diff(wantConfig, gotConfig, cmpOpts); diff != "" { + t.Fatalf("Unexpected diff in bootstrap config (-want +got):\n%s", diff) } case <-time.After(time.Second): t.Fatalf("timeout waiting for client config") @@ -238,3 +257,20 @@ func TestBuildXDS(t *testing.T) { }) } } + +// TestDialFailsWhenTargetContainsAuthority attempts to Dial a target URI of +// google-c2p scheme with a non-empty authority and verifies that it fails with +// an expected error. +func TestBuildFailsWhenCalledWithAuthority(t *testing.T) { + uri := "google-c2p://an-authority/resource" + cc, err := grpc.Dial(uri, grpc.WithTransportCredentials(insecure.NewCredentials())) + defer func() { + if cc != nil { + cc.Close() + } + }() + wantErr := "google-c2p URI scheme does not support authorities" + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("grpc.Dial(%s) returned error: %v, want: %v", uri, err, wantErr) + } +} diff --git a/xds/googledirectpath/utils.go b/xds/googledirectpath/utils.go index 553b87adf47a..de33cf48d0e5 100644 --- a/xds/googledirectpath/utils.go +++ b/xds/googledirectpath/utils.go @@ -21,7 +21,7 @@ package googledirectpath import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "sync" @@ -41,15 +41,15 @@ func getFromMetadata(timeout time.Duration, urlStr string) ([]byte, error) { } resp, err := client.Do(req) if err != nil { - return nil, fmt.Errorf("failed communicating with metadata server: %w", err) + return nil, fmt.Errorf("failed communicating with metadata server: %v", err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("metadata server returned resp with non-OK: %v", resp) } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("failed reading from metadata server: %w", err) + return nil, fmt.Errorf("failed reading from metadata server: %v", err) } return body, nil } diff --git a/xds/internal/balancer/balancer.go b/xds/internal/balancer/balancer.go index 5883027a2c52..68ed789f2a4d 100644 --- a/xds/internal/balancer/balancer.go +++ b/xds/internal/balancer/balancer.go @@ -20,8 +20,11 @@ package balancer import ( - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer - _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer - _ "google.golang.org/grpc/xds/internal/balancer/edsbalancer" // Register the EDS balancer - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer + _ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer + _ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer + _ "google.golang.org/grpc/xds/internal/balancer/outlierdetection" // Register the outlier_detection balancer + _ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer ) diff --git a/xds/internal/balancer/balancergroup/balancergroup_test.go b/xds/internal/balancer/balancergroup/balancergroup_test.go deleted file mode 100644 index 0ad4bf8df10f..000000000000 --- a/xds/internal/balancer/balancergroup/balancergroup_test.go +++ /dev/null @@ -1,979 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// All tests in this file are combination of balancer group and -// weighted_balancerstate_aggregator, aka weighted_target tests. The difference -// is weighted_target tests cannot add sub-balancers to balancer group directly, -// they instead uses balancer config to control sub-balancers. Even though not -// very suited, the tests still cover all the functionality. -// -// TODO: the tests should be moved to weighted_target, and balancer group's -// tests should use a mock balancerstate_aggregator. - -package balancergroup - -import ( - "fmt" - "testing" - "time" - - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - - "google.golang.org/grpc" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/balancer/stub" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" - "google.golang.org/grpc/xds/internal/client/load" - "google.golang.org/grpc/xds/internal/testutils" -) - -var ( - rrBuilder = balancer.Get(roundrobin.Name) - pfBuilder = balancer.Get(grpc.PickFirstBalancerName) - testBalancerIDs = []string{"b1", "b2", "b3"} - testBackendAddrs []resolver.Address -) - -const testBackendAddrsCount = 12 - -func init() { - for i := 0; i < testBackendAddrsCount; i++ { - testBackendAddrs = append(testBackendAddrs, resolver.Address{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)}) - } - - // Disable caching for all tests. It will be re-enabled in caching specific - // tests. - DefaultSubBalancerCloseTimeout = time.Millisecond -} - -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - -func newTestBalancerGroup(t *testing.T, loadStore load.PerClusterReporter) (*testutils.TestClientConn, *weightedaggregator.Aggregator, *BalancerGroup) { - cc := testutils.NewTestClientConn(t) - gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) - gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, loadStore, nil) - bg.Start() - return cc, gator, bg -} - -// 1 balancer, 1 backend -> 2 backends -> 1 backend. -func (s) TestBalancerGroup_OneRR_AddRemoveBackend(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) - - // Add one balancer to group. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - // Send one resolved address. - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - - // Send subconn state change. - sc1 := <-cc.NewSubConnCh - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with one backend. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - } - - // Send two addresses. - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - // Expect one new subconn, send state update. - sc2 := <-cc.NewSubConnCh - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin pick. - p2 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove the first address. - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:2]}}) - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - bg.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - // Test pick with only the second subconn. - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSC, _ := p3.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSC.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSC, sc2) - } - } -} - -// 2 balancers, each with 1 backend. -func (s) TestBalancerGroup_TwoRR_OneBackend(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) - - // Add two balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc1 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc2 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// 2 balancers, each with more than 1 backends. -func (s) TestBalancerGroup_TwoRR_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) - - // Add two balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - sc1 := <-cc.NewSubConnCh - sc2 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - sc3 := <-cc.NewSubConnCh - sc4 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn sc2's connection down, should be RR between balancers. - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - p2 := <-cc.NewPickerCh - // Expect two sc1's in the result, because balancer1 will be picked twice, - // but there's only one sc in it. - want = []balancer.SubConn{sc1, sc1, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove sc3's addresses. - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[3:4]}}) - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc3, scToRemove) - } - bg.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - p3 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn sc1's connection down. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - p4 := <-cc.NewPickerCh - want = []balancer.SubConn{sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn last connection to connecting. - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - p5 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p5.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } - } - - // Turn all connections down. - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - p6 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p6.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) - } - } -} - -// 2 balancers with different weights. -func (s) TestBalancerGroup_TwoRR_DifferentWeight_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - sc1 := <-cc.NewSubConnCh - sc2 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - sc3 := <-cc.NewSubConnCh - sc4 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// totally 3 balancers, add/remove balancer. -func (s) TestBalancerGroup_ThreeRR_RemoveBalancer(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) - - // Add three balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc1 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:2]}}) - sc2 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[2], 1) - bg.Add(testBalancerIDs[2], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:2]}}) - sc3 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove the second balancer, while the others two are ready. - gator.Remove(testBalancerIDs[1]) - bg.Remove(testBalancerIDs[1]) - gator.BuildAndUpdate() - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove) - } - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // move balancer 3 into transient failure. - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - // Remove the first balancer, while the third is transient failure. - gator.Remove(testBalancerIDs[0]) - bg.Remove(testBalancerIDs[0]) - gator.BuildAndUpdate() - scToRemove = <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p3.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) - } - } -} - -// 2 balancers, change balancer weight. -func (s) TestBalancerGroup_TwoRR_ChangeWeight_MoreBackends(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - sc1 := <-cc.NewSubConnCh - sc2 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - sc3 := <-cc.NewSubConnCh - sc4 := <-cc.NewSubConnCh - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc1, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - gator.UpdateWeight(testBalancerIDs[0], 3) - gator.BuildAndUpdate() - - // Test roundrobin with new weight. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1, sc1, sc1, sc2, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -func (s) TestBalancerGroup_LoadReport(t *testing.T) { - loadStore := load.NewStore() - const ( - testCluster = "test-cluster" - testEDSService = "test-eds-service" - ) - cc, gator, bg := newTestBalancerGroup(t, loadStore.PerCluster(testCluster, testEDSService)) - - backendToBalancerID := make(map[balancer.SubConn]string) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - sc1 := <-cc.NewSubConnCh - sc2 := <-cc.NewSubConnCh - backendToBalancerID[sc1] = testBalancerIDs[0] - backendToBalancerID[sc2] = testBalancerIDs[0] - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - sc3 := <-cc.NewSubConnCh - sc4 := <-cc.NewSubConnCh - backendToBalancerID[sc3] = testBalancerIDs[1] - backendToBalancerID[sc4] = testBalancerIDs[1] - - // Send state changes for both subconns. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc4, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - // bg1 has a weight of 2, while bg2 has a weight of 1. So, we expect 20 of - // these picks to go to bg1 and 10 of them to bg2. And since there are two - // subConns in each group, we expect the picks to be equally split between - // the subConns. We do not call Done() on picks routed to sc1, so we expect - // these to show up as pending rpcs. - wantStoreData := []*load.Data{{ - Cluster: testCluster, - Service: testEDSService, - LocalityStats: map[string]load.LocalityData{ - testBalancerIDs[0]: { - RequestStats: load.RequestData{Succeeded: 10, InProgress: 10}, - LoadStats: map[string]load.ServerLoadData{ - "cpu_utilization": {Count: 10, Sum: 100}, - "mem_utilization": {Count: 10, Sum: 50}, - "pic": {Count: 10, Sum: 31.4}, - "piu": {Count: 10, Sum: 31.4}, - }, - }, - testBalancerIDs[1]: { - RequestStats: load.RequestData{Succeeded: 10}, - LoadStats: map[string]load.ServerLoadData{ - "cpu_utilization": {Count: 10, Sum: 100}, - "mem_utilization": {Count: 10, Sum: 50}, - "pic": {Count: 10, Sum: 31.4}, - "piu": {Count: 10, Sum: 31.4}, - }, - }, - }, - }} - for i := 0; i < 30; i++ { - scst, _ := p1.Pick(balancer.PickInfo{}) - if scst.Done != nil && scst.SubConn != sc1 { - scst.Done(balancer.DoneInfo{ - ServerLoad: &orcapb.OrcaLoadReport{ - CpuUtilization: 10, - MemUtilization: 5, - RequestCost: map[string]float64{"pic": 3.14}, - Utilization: map[string]float64{"piu": 3.14}, - }, - }) - } - } - - gotStoreData := loadStore.Stats([]string{testCluster}) - if diff := cmp.Diff(wantStoreData, gotStoreData, cmpopts.EquateEmpty(), cmpopts.EquateApprox(0, 0.1), cmpopts.IgnoreFields(load.Data{}, "ReportInterval")); diff != "" { - t.Errorf("store.stats() returned unexpected diff (-want +got):\n%s", diff) - } -} - -// Create a new balancer group, add balancer and backends, but not start. -// - b1, weight 2, backends [0,1] -// - b2, weight 1, backends [2,3] -// Start the balancer group and check behavior. -// -// Close the balancer group, call add/remove/change weight/change address. -// - b2, weight 3, backends [0,3] -// - b3, weight 1, backends [1,2] -// Start the balancer group again and check for behavior. -func (s) TestBalancerGroup_start_close(t *testing.T) { - cc := testutils.NewTestClientConn(t) - gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) - gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil, nil) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - - bg.Start() - - m1 := make(map[resolver.Address]balancer.SubConn) - for i := 0; i < 4; i++ { - addrs := <-cc.NewSubConnAddrsCh - sc := <-cc.NewSubConnCh - m1[addrs[0]] = sc - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - } - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], - m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], - m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - gator.Stop() - bg.Close() - for i := 0; i < 4; i++ { - bg.UpdateSubConnState(<-cc.RemoveSubConnCh, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - } - - // Add b3, weight 1, backends [1,2]. - gator.Add(testBalancerIDs[2], 1) - bg.Add(testBalancerIDs[2], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[2], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[1:3]}}) - - // Remove b1. - gator.Remove(testBalancerIDs[0]) - bg.Remove(testBalancerIDs[0]) - - // Update b2 to weight 3, backends [0,3]. - gator.UpdateWeight(testBalancerIDs[1], 3) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: append([]resolver.Address(nil), testBackendAddrs[0], testBackendAddrs[3])}}) - - gator.Start() - bg.Start() - - m2 := make(map[resolver.Address]balancer.SubConn) - for i := 0; i < 4; i++ { - addrs := <-cc.NewSubConnAddrsCh - sc := <-cc.NewSubConnCh - m2[addrs[0]] = sc - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - } - - // Test roundrobin on the last picker. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{ - m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], m2[testBackendAddrs[0]], - m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], m2[testBackendAddrs[3]], - m2[testBackendAddrs[1]], m2[testBackendAddrs[2]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// Test that balancer group start() doesn't deadlock if the balancer calls back -// into balancer group inline when it gets an update. -// -// The potential deadlock can happen if we -// - hold a lock and send updates to balancer (e.g. update resolved addresses) -// - the balancer calls back (NewSubConn or update picker) in line -// The callback will try to hold hte same lock again, which will cause a -// deadlock. -// -// This test starts the balancer group with a test balancer, will updates picker -// whenever it gets an address update. It's expected that start() doesn't block -// because of deadlock. -func (s) TestBalancerGroup_start_close_deadlock(t *testing.T) { - const balancerName = "stub-TestBalancerGroup_start_close_deadlock" - stub.Register(balancerName, stub.BalancerFuncs{}) - builder := balancer.Get(balancerName) - - cc := testutils.NewTestClientConn(t) - gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) - gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil, nil) - - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], builder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], builder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - - bg.Start() -} - -// Test that at init time, with two sub-balancers, if one sub-balancer reports -// transient_failure, the picks won't fail with transient_failure, and should -// instead wait for the other sub-balancer. -func (s) TestBalancerGroup_InitOneSubBalancerTransientFailure(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) - - // Add two balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc1 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - <-cc.NewSubConnCh - - // Set one subconn to TransientFailure, this will trigger one sub-balancer - // to report transient failure. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - r, err := p1.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrNoSubConnAvailable, r, err) - } - } -} - -// Test that with two sub-balancers, both in transient_failure, if one turns -// connecting, the overall state stays in transient_failure, and all picks -// return transient failure error. -func (s) TestBalancerGroup_SubBalancerTurnsConnectingFromTransientFailure(t *testing.T) { - cc, gator, bg := newTestBalancerGroup(t, nil) - - // Add two balancers to group and send one resolved address to both - // balancers. - gator.Add(testBalancerIDs[0], 1) - bg.Add(testBalancerIDs[0], pfBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc1 := <-cc.NewSubConnCh - - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], pfBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:1]}}) - sc2 := <-cc.NewSubConnCh - - // Set both subconn to TransientFailure, this will put both sub-balancers in - // transient failure. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - bg.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - r, err := p1.Pick(balancer.PickInfo{}) - if err != balancer.ErrTransientFailure { - t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err) - } - } - - // Set one subconn to Connecting, it shouldn't change the overall state. - bg.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - r, err := p2.Pick(balancer.PickInfo{}) - if err != balancer.ErrTransientFailure { - t.Fatalf("want pick to fail with %v, got result %v, err %v", balancer.ErrTransientFailure, r, err) - } - } -} - -func replaceDefaultSubBalancerCloseTimeout(n time.Duration) func() { - old := DefaultSubBalancerCloseTimeout - DefaultSubBalancerCloseTimeout = n - return func() { DefaultSubBalancerCloseTimeout = old } -} - -// initBalancerGroupForCachingTest creates a balancer group, and initialize it -// to be ready for caching tests. -// -// Two rr balancers are added to bg, each with 2 ready subConns. A sub-balancer -// is removed later, so the balancer group returned has one sub-balancer in its -// own map, and one sub-balancer in cache. -func initBalancerGroupForCachingTest(t *testing.T) (*weightedaggregator.Aggregator, *BalancerGroup, *testutils.TestClientConn, map[resolver.Address]balancer.SubConn) { - cc := testutils.NewTestClientConn(t) - gator := weightedaggregator.New(cc, nil, testutils.NewTestWRR) - gator.Start() - bg := New(cc, balancer.BuildOptions{}, gator, nil, nil) - - // Add two balancers to group and send two resolved addresses to both - // balancers. - gator.Add(testBalancerIDs[0], 2) - bg.Add(testBalancerIDs[0], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[0:2]}}) - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[2:4]}}) - - bg.Start() - - m1 := make(map[resolver.Address]balancer.SubConn) - for i := 0; i < 4; i++ { - addrs := <-cc.NewSubConnAddrsCh - sc := <-cc.NewSubConnCh - m1[addrs[0]] = sc - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - } - - // Test roundrobin on the last picker. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[0]], - m1[testBackendAddrs[1]], m1[testBackendAddrs[1]], - m1[testBackendAddrs[2]], m1[testBackendAddrs[3]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - gator.Remove(testBalancerIDs[1]) - bg.Remove(testBalancerIDs[1]) - gator.BuildAndUpdate() - // Don't wait for SubConns to be removed after close, because they are only - // removed after close timeout. - for i := 0; i < 10; i++ { - select { - case <-cc.RemoveSubConnCh: - t.Fatalf("Got request to remove subconn, want no remove subconn (because subconns were still in cache)") - default: - } - time.Sleep(time.Millisecond) - } - // Test roundrobin on the with only sub-balancer0. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{ - m1[testBackendAddrs[0]], m1[testBackendAddrs[1]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - return gator, bg, cc, m1 -} - -// Test that if a sub-balancer is removed, and re-added within close timeout, -// the subConns won't be re-created. -func (s) TestBalancerGroup_locality_caching(t *testing.T) { - defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() - gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) - - // Turn down subconn for addr2, shouldn't get picker update because - // sub-balancer1 was removed. - bg.UpdateSubConnState(addrToSC[testBackendAddrs[2]], balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - for i := 0; i < 10; i++ { - select { - case <-cc.NewPickerCh: - t.Fatalf("Got new picker, want no new picker (because the sub-balancer was removed)") - default: - } - time.Sleep(time.Millisecond) - } - - // Sleep, but sleep less then close timeout. - time.Sleep(time.Millisecond * 100) - - // Re-add sub-balancer-1, because subconns were in cache, no new subconns - // should be created. But a new picker will still be generated, with subconn - // states update to date. - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], rrBuilder) - - p3 := <-cc.NewPickerCh - want := []balancer.SubConn{ - addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], - addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], - // addr2 is down, b2 only has addr3 in READY state. - addrToSC[testBackendAddrs[3]], addrToSC[testBackendAddrs[3]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - for i := 0; i < 10; i++ { - select { - case <-cc.NewSubConnAddrsCh: - t.Fatalf("Got new subconn, want no new subconn (because subconns were still in cache)") - default: - } - time.Sleep(time.Millisecond * 10) - } -} - -// Sub-balancers are put in cache when they are removed. If balancer group is -// closed within close timeout, all subconns should still be rmeoved -// immediately. -func (s) TestBalancerGroup_locality_caching_close_group(t *testing.T) { - defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() - _, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) - - bg.Close() - // The balancer group is closed. The subconns should be removed immediately. - removeTimeout := time.After(time.Millisecond * 500) - scToRemove := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[0]]: 1, - addrToSC[testBackendAddrs[1]]: 1, - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, - } - for i := 0; i < len(scToRemove); i++ { - select { - case sc := <-cc.RemoveSubConnCh: - c := scToRemove[sc] - if c == 0 { - t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) - } - scToRemove[sc] = c - 1 - case <-removeTimeout: - t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") - } - } -} - -// Sub-balancers in cache will be closed if not re-added within timeout, and -// subConns will be removed. -func (s) TestBalancerGroup_locality_caching_not_readd_within_timeout(t *testing.T) { - defer replaceDefaultSubBalancerCloseTimeout(time.Second)() - _, _, cc, addrToSC := initBalancerGroupForCachingTest(t) - - // The sub-balancer is not re-added withtin timeout. The subconns should be - // removed. - removeTimeout := time.After(DefaultSubBalancerCloseTimeout) - scToRemove := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, - } - for i := 0; i < len(scToRemove); i++ { - select { - case sc := <-cc.RemoveSubConnCh: - c := scToRemove[sc] - if c == 0 { - t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) - } - scToRemove[sc] = c - 1 - case <-removeTimeout: - t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") - } - } -} - -// Wrap the rr builder, so it behaves the same, but has a different pointer. -type noopBalancerBuilderWrapper struct { - balancer.Builder -} - -// After removing a sub-balancer, re-add with same ID, but different balancer -// builder. Old subconns should be removed, and new subconns should be created. -func (s) TestBalancerGroup_locality_caching_readd_with_different_builder(t *testing.T) { - defer replaceDefaultSubBalancerCloseTimeout(10 * time.Second)() - gator, bg, cc, addrToSC := initBalancerGroupForCachingTest(t) - - // Re-add sub-balancer-1, but with a different balancer builder. The - // sub-balancer was still in cache, but cann't be reused. This should cause - // old sub-balancer's subconns to be removed immediately, and new subconns - // to be created. - gator.Add(testBalancerIDs[1], 1) - bg.Add(testBalancerIDs[1], &noopBalancerBuilderWrapper{rrBuilder}) - - // The cached sub-balancer should be closed, and the subconns should be - // removed immediately. - removeTimeout := time.After(time.Millisecond * 500) - scToRemove := map[balancer.SubConn]int{ - addrToSC[testBackendAddrs[2]]: 1, - addrToSC[testBackendAddrs[3]]: 1, - } - for i := 0; i < len(scToRemove); i++ { - select { - case sc := <-cc.RemoveSubConnCh: - c := scToRemove[sc] - if c == 0 { - t.Fatalf("Got removeSubConn for %v when there's %d remove expected", sc, c) - } - scToRemove[sc] = c - 1 - case <-removeTimeout: - t.Fatalf("timeout waiting for subConns (from balancer in cache) to be removed") - } - } - - bg.UpdateClientConnState(testBalancerIDs[1], balancer.ClientConnState{ResolverState: resolver.State{Addresses: testBackendAddrs[4:6]}}) - - newSCTimeout := time.After(time.Millisecond * 500) - scToAdd := map[resolver.Address]int{ - testBackendAddrs[4]: 1, - testBackendAddrs[5]: 1, - } - for i := 0; i < len(scToAdd); i++ { - select { - case addr := <-cc.NewSubConnAddrsCh: - c := scToAdd[addr[0]] - if c == 0 { - t.Fatalf("Got newSubConn for %v when there's %d new expected", addr, c) - } - scToAdd[addr[0]] = c - 1 - sc := <-cc.NewSubConnCh - addrToSC[addr[0]] = sc - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - case <-newSCTimeout: - t.Fatalf("timeout waiting for subConns (from new sub-balancer) to be newed") - } - } - - // Test roundrobin on the new picker. - p3 := <-cc.NewPickerCh - want := []balancer.SubConn{ - addrToSC[testBackendAddrs[0]], addrToSC[testBackendAddrs[0]], - addrToSC[testBackendAddrs[1]], addrToSC[testBackendAddrs[1]], - addrToSC[testBackendAddrs[4]], addrToSC[testBackendAddrs[5]], - } - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// TestBalancerGroupBuildOptions verifies that the balancer.BuildOptions passed -// to the balancergroup at creation time is passed to child policies. -func (s) TestBalancerGroupBuildOptions(t *testing.T) { - const ( - balancerName = "stubBalancer-TestBalancerGroupBuildOptions" - parent = int64(1234) - userAgent = "ua" - defaultTestTimeout = 1 * time.Second - ) - - // Setup the stub balancer such that we can read the build options passed to - // it in the UpdateClientConnState method. - bOpts := balancer.BuildOptions{ - DialCreds: insecure.NewCredentials(), - ChannelzParentID: parent, - CustomUserAgent: userAgent, - } - stub.Register(balancerName, stub.BalancerFuncs{ - UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { - if !cmp.Equal(bd.BuildOptions, bOpts) { - return fmt.Errorf("buildOptions in child balancer: %v, want %v", bd, bOpts) - } - return nil - }, - }) - cc := testutils.NewTestClientConn(t) - bg := New(cc, bOpts, nil, nil, nil) - bg.Start() - - // Add the stub balancer build above as a child policy. - balancerBuilder := balancer.Get(balancerName) - bg.Add(testBalancerIDs[0], balancerBuilder) - - // Send an empty clientConn state change. This should trigger the - // verification of the buildOptions being passed to the child policy. - if err := bg.UpdateClientConnState(testBalancerIDs[0], balancer.ClientConnState{}); err != nil { - t.Fatal(err) - } -} diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/xds/internal/balancer/cdsbalancer/cdsbalancer.go index b991981c14c0..bcdeaf681ab5 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -27,70 +27,76 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal/balancer/nop" "google.golang.org/grpc/internal/buffer" xdsinternal "google.golang.org/grpc/internal/credentials/xds" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/edsbalancer" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( cdsName = "cds_experimental" - edsName = "eds_experimental" ) var ( - errBalancerClosed = errors.New("cdsBalancer is closed") + errBalancerClosed = errors.New("cds_experimental LB policy is closed") - // newEDSBalancer is a helper function to build a new edsBalancer and will be - // overridden in unittests. - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { - builder := balancer.Get(edsName) + // newChildBalancer is a helper function to build a new cluster_resolver + // balancer and will be overridden in unittests. + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + builder := balancer.Get(clusterresolver.Name) if builder == nil { - return nil, fmt.Errorf("xds: no balancer builder with name %v", edsName) + return nil, fmt.Errorf("xds: no balancer builder with name %v", clusterresolver.Name) } - // We directly pass the parent clientConn to the - // underlying edsBalancer because the cdsBalancer does - // not deal with subConns. + // We directly pass the parent clientConn to the underlying + // cluster_resolver balancer because the cdsBalancer does not deal with + // subConns. return builder.Build(cc, opts), nil } - newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } buildProvider = buildProviderFunc ) func init() { - balancer.Register(cdsBB{}) + balancer.Register(bb{}) } -// cdsBB (short for cdsBalancerBuilder) implements the balancer.Builder -// interface to help build a cdsBalancer. +// bb implements the balancer.Builder interface to help build a cdsBalancer. // It also implements the balancer.ConfigParser interface to help parse the // JSON service config, to be passed to the cdsBalancer. -type cdsBB struct{} +type bb struct{} // Build creates a new CDS balancer with the ClientConn. -func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + // Shouldn't happen, registered through imported Cluster Resolver, + // defensive programming. + logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)) + } + crParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Cluster Resolver builder has this method. + logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)) + } b := &cdsBalancer{ - bOpts: opts, - updateCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - cancelWatch: func() {}, // No-op at this point. - xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), + bOpts: opts, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + crParser: crParser, + xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), } b.logger = prefixLogger((b)) b.logger.Infof("Created") - - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil - } - b.xdsClient = client - var creds credentials.TransportCredentials switch { case opts.DialCreds != nil: @@ -102,7 +108,7 @@ func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. b.xdsCredsInUse = true } b.logger.Infof("xDS credentials in use: %v", b.xdsCredsInUse) - + b.clusterHandler = newClusterHandler(b) b.ccw = &ccWrapper{ ClientConn: cc, xdsHI: b.xdsHI, @@ -112,7 +118,7 @@ func (cdsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. } // Name returns the name of balancers built by this builder. -func (cdsBB) Name() string { +func (bb) Name() string { return cdsName } @@ -125,7 +131,7 @@ type lbConfig struct { // ParseConfig parses the JSON load balancer config provided into an // internal form or returns an error if the config is invalid. -func (cdsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg lbConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, fmt.Errorf("xds: unable to unmarshal lbconfig: %s, error: %v", string(c), err) @@ -133,54 +139,41 @@ func (cdsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, return &cfg, nil } -// xdsClientInterface contains methods from xdsClient.Client which are used by -// the cdsBalancer. This will be faked out in unittests. -type xdsClientInterface interface { - WatchCluster(string, func(xdsclient.ClusterUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - // ccUpdate wraps a clientConn update received from gRPC (pushed from the // xdsResolver). A valid clusterName causes the cdsBalancer to register a CDS // watcher with the xdsClient, while a non-nil error causes it to cancel the -// existing watch and propagate the error to the underlying edsBalancer. +// existing watch and propagate the error to the underlying cluster_resolver +// balancer. type ccUpdate struct { clusterName string err error } // scUpdate wraps a subConn update received from gRPC. This is directly passed -// on to the edsBalancer. +// on to the cluster_resolver balancer. type scUpdate struct { subConn balancer.SubConn state balancer.SubConnState } -// watchUpdate wraps the information received from a registered CDS watcher. A -// non-nil error is propagated to the underlying edsBalancer. A valid update -// results in creating a new edsBalancer (if one doesn't already exist) and -// pushing the update to it. -type watchUpdate struct { - cds xdsclient.ClusterUpdate - err error -} +type exitIdle struct{} -// cdsBalancer implements a CDS based LB policy. It instantiates an EDS based -// LB policy to further resolve the serviceName received from CDS, into -// localities and endpoints. Implements the balancer.Balancer interface which -// is exposed to gRPC and implements the balancer.ClientConn interface which is -// exposed to the edsBalancer. +// cdsBalancer implements a CDS based LB policy. It instantiates a +// cluster_resolver balancer to further resolve the serviceName received from +// CDS, into localities and endpoints. Implements the balancer.Balancer +// interface which is exposed to gRPC and implements the balancer.ClientConn +// interface which is exposed to the cluster_resolver balancer. type cdsBalancer struct { ccw *ccWrapper // ClientConn interface passed to child LB. bOpts balancer.BuildOptions // BuildOptions passed to child LB. updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates. - xdsClient xdsClientInterface // xDS client to watch Cluster resource. - cancelWatch func() // Cluster watch cancel func. - edsLB balancer.Balancer // EDS child policy. - clusterToWatch string + xdsClient xdsclient.XDSClient // xDS client to watch Cluster resource. + clusterHandler *clusterHandler // To watch the clusters. + childLB balancer.Balancer logger *grpclog.PrefixLogger closed *grpcsync.Event + done *grpcsync.Event + crParser balancer.ConfigParser // The certificate providers are cached here to that they can be closed when // a new provider is to be created. @@ -193,32 +186,22 @@ type cdsBalancer struct { // handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good // updates lead to registration of a CDS watch. Updates with error lead to // cancellation of existing watch and propagation of the same error to the -// edsBalancer. +// cluster_resolver balancer. func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) { // We first handle errors, if any, and then proceed with handling the // update, only if the status quo has changed. if err := update.err; err != nil { b.handleErrorFromUpdate(err, true) - } - if b.clusterToWatch == update.clusterName { return } - if update.clusterName != "" { - cancelWatch := b.xdsClient.WatchCluster(update.clusterName, b.handleClusterUpdate) - b.logger.Infof("Watch started on resource name %v with xds-client %p", update.clusterName, b.xdsClient) - b.cancelWatch = func() { - cancelWatch() - b.logger.Infof("Watch cancelled on resource name %v with xds-client %p", update.clusterName, b.xdsClient) - } - b.clusterToWatch = update.clusterName - } + b.clusterHandler.updateRootCluster(update.clusterName) } // handleSecurityConfig processes the security configuration received from the // management server, creates appropriate certificate provider plugins, and // updates the HandhakeInfo which is added as an address attribute in // NewSubConn() calls. -func (b *cdsBalancer) handleSecurityConfig(config *xdsclient.SecurityConfig) error { +func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error { // If xdsCredentials are not in use, i.e, the user did not want to get // security configuration from an xDS server, we should not be acting on the // received security config here. Doing so poses a security threat. @@ -303,58 +286,118 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc } // handleWatchUpdate handles a watch update from the xDS Client. Good updates -// lead to clientConn updates being invoked on the underlying edsBalancer. -func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { +// lead to clientConn updates being invoked on the underlying cluster_resolver balancer. +func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { if err := update.err; err != nil { b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) b.handleErrorFromUpdate(err, false) return } - b.logger.Infof("Watch update from xds-client %p, content: %+v", b.xdsClient, update.cds) + b.logger.Infof("Received Cluster resource contains content: %s, security config: %s", pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) // Process the security config from the received update before building the // child policy or forwarding the update to it. We do this because the child // policy may try to create a new subConn inline. Processing the security // configuration here and setting up the handshakeInfo will make sure that // such attempts are handled properly. - if err := b.handleSecurityConfig(update.cds.SecurityCfg); err != nil { + if err := b.handleSecurityConfig(update.securityCfg); err != nil { // If the security config is invalid, for example, if the provider // instance is not found in the bootstrap config, we need to put the // channel in transient failure. - b.logger.Warningf("Invalid security config update from xds-client %p: %v", b.xdsClient, err) + b.logger.Warningf("Received Cluster resource contains invalid security config: %v", err) b.handleErrorFromUpdate(err, false) return } // The first good update from the watch API leads to the instantiation of an - // edsBalancer. Further updates/errors are propagated to the existing - // edsBalancer. - if b.edsLB == nil { - edsLB, err := newEDSBalancer(b.ccw, b.bOpts) + // cluster_resolver balancer. Further updates/errors are propagated to the existing + // cluster_resolver balancer. + if b.childLB == nil { + childLB, err := newChildBalancer(b.ccw, b.bOpts) if err != nil { - b.logger.Errorf("Failed to create child policy of type %s, %v", edsName, err) + b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) return } - b.edsLB = edsLB - b.logger.Infof("Created child policy %p of type %s", b.edsLB, edsName) + b.childLB = childLB + b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) + } + + dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates)) + for i, cu := range update.updates { + switch cu.ClusterType { + case xdsresource.ClusterTypeEDS: + dms[i] = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, + Cluster: cu.ClusterName, + EDSServiceName: cu.EDSServiceName, + MaxConcurrentRequests: cu.MaxRequests, + } + if cu.LRSServerConfig == xdsresource.ClusterLRSServerSelf { + bootstrapConfig := b.xdsClient.BootstrapConfig() + parsedName := xdsresource.ParseName(cu.ClusterName) + if parsedName.Scheme == xdsresource.FederationScheme { + // Is a federation resource name, find the corresponding + // authority server config. + if cfg, ok := bootstrapConfig.Authorities[parsedName.Authority]; ok { + dms[i].LoadReportingServer = cfg.XDSServer + } + } else { + // Not a federation resource name, use the default + // authority. + dms[i].LoadReportingServer = bootstrapConfig.XDSServer + } + } + case xdsresource.ClusterTypeLogicalDNS: + dms[i] = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, + Cluster: cu.ClusterName, + DNSHostname: cu.DNSHostName, + } + default: + b.logger.Infof("Unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) + } + if envconfig.XDSOutlierDetection { + odJSON := cu.OutlierDetection + // "In the cds LB policy, if the outlier_detection field is not set in + // the Cluster resource, a "no-op" outlier_detection config will be + // generated in the corresponding DiscoveryMechanism config, with all + // fields unset." - A50 + if odJSON == nil { + // This will pick up top level defaults in Cluster Resolver + // ParseConfig, but sre and fpe will be nil still so still a + // "no-op" config. + odJSON = json.RawMessage(`{}`) + } + dms[i].OutlierDetection = odJSON + } } - lbCfg := &edsbalancer.EDSConfig{ - EDSServiceName: update.cds.ServiceName, - MaxConcurrentRequests: update.cds.MaxRequests, + + // Prepare Cluster Resolver config, marshal into JSON, and then Parse it to + // get configuration to send downward to Cluster Resolver. + lbCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: dms, + XDSLBPolicy: update.lbPolicy, + } + crLBCfgJSON, err := json.Marshal(lbCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", lbCfg) + return } - if update.cds.EnableLRS { - // An empty string here indicates that the edsBalancer should use the - // same xDS server for load reporting as it does for EDS - // requests/responses. - lbCfg.LrsLoadReportingServerName = new(string) + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.crParser.ParseConfig(crLBCfgJSON); err != nil { + b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", crLBCfgJSON, err) + return } + ccState := balancer.ClientConnState{ - BalancerConfig: lbCfg, + ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), + BalancerConfig: sc, } - if err := b.edsLB.UpdateClientConnState(ccState); err != nil { - b.logger.Errorf("xds: edsBalancer.UpdateClientConnState(%+v) returned error: %v", ccState, err) + if err := b.childLB.UpdateClientConnState(ccState); err != nil { + b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) } } @@ -364,36 +407,52 @@ func (b *cdsBalancer) handleWatchUpdate(update *watchUpdate) { func (b *cdsBalancer) run() { for { select { - case u := <-b.updateCh.Get(): + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } b.updateCh.Load() switch update := u.(type) { case *ccUpdate: b.handleClientConnUpdate(update) case *scUpdate: // SubConn updates are passthrough and are simply handed over to - // the underlying edsBalancer. - if b.edsLB == nil { - b.logger.Errorf("xds: received scUpdate {%+v} with no edsBalancer", update) + // the underlying cluster_resolver balancer. + if b.childLB == nil { + b.logger.Errorf("Received SubConn update with no child policy: %+v", update) + break + } + b.childLB.UpdateSubConnState(update.subConn, update.state) + case exitIdle: + if b.childLB == nil { + b.logger.Errorf("Received ExitIdle with no child policy") break } - b.edsLB.UpdateSubConnState(update.subConn, update.state) - case *watchUpdate: - b.handleWatchUpdate(update) + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + } } - - // Close results in cancellation of the CDS watch and closing of the - // underlying edsBalancer and is the only way to exit this goroutine. + case u := <-b.clusterHandler.updateChannel: + b.handleWatchUpdate(u) case <-b.closed.Done(): - b.cancelWatch() - b.cancelWatch = func() {} - - if b.edsLB != nil { - b.edsLB.Close() - b.edsLB = nil + b.clusterHandler.close() + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil } - b.xdsClient.Close() - // This is the *ONLY* point of return from this function. + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } + b.updateCh.Close() b.logger.Infof("Shutdown") + b.done.Fire() return } } @@ -412,23 +471,22 @@ func (b *cdsBalancer) run() { // - If it's from xds client, it means CDS resource were removed. The CDS // watcher should keep watching. // -// In both cases, the error will be forwarded to EDS balancer. And if error is -// resource-not-found, the child EDS balancer will stop watching EDS. +// In both cases, the error will be forwarded to the child balancer. And if +// error is resource-not-found, the child balancer will stop watching EDS. func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - // TODO: connection errors will be sent to the eds balancers directly, and - // also forwarded by the parent balancers/resolvers. So the eds balancer may - // see the same error multiple times. We way want to only forward the error - // to eds if it's not a connection error. - // // This is not necessary today, because xds client never sends connection // errors. - if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { - b.cancelWatch() - } - if b.edsLB != nil { - b.edsLB.ResolverError(err) + if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.clusterHandler.close() + } + if b.childLB != nil { + if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { + // Connection errors will be sent to the child balancers directly. + // There's no need to forward them. + b.childLB.ResolverError(err) + } } else { - // If eds balancer was never created, fail the RPCs with + // If child balancer was never created, fail the RPCs with // errors. b.ccw.UpdateState(balancer.State{ ConnectivityState: connectivity.TransientFailure, @@ -437,36 +495,34 @@ func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { } } -// handleClusterUpdate is the CDS watch API callback. It simply pushes the -// received information on to the update channel for run() to pick it up. -func (b *cdsBalancer) handleClusterUpdate(cu xdsclient.ClusterUpdate, err error) { - if b.closed.HasFired() { - b.logger.Warningf("xds: received cluster update {%+v} after cdsBalancer was closed", cu) - return - } - b.updateCh.Put(&watchUpdate{cds: cu, err: err}) -} - // UpdateClientConnState receives the serviceConfig (which contains the // clusterName to watch for in CDS) and the xdsClient object from the // xdsResolver. func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { if b.closed.HasFired() { - b.logger.Warningf("xds: received ClientConnState {%+v} after cdsBalancer was closed", state) + b.logger.Errorf("Received balancer config after close") return errBalancerClosed } - b.logger.Infof("Received update from resolver, balancer config: %+v", state.BalancerConfig) + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + b.logger.Infof("Received balancer config update: %s", pretty.ToJSON(state.BalancerConfig)) + // The errors checked here should ideally never happen because the // ServiceConfig in this case is prepared by the xdsResolver and is not // something that is received on the wire. lbCfg, ok := state.BalancerConfig.(*lbConfig) if !ok { - b.logger.Warningf("xds: unexpected LoadBalancingConfig type: %T", state.BalancerConfig) + b.logger.Warningf("Received unexpected balancer config type: %T", state.BalancerConfig) return balancer.ErrBadResolverState } if lbCfg.ClusterName == "" { - b.logger.Warningf("xds: no clusterName found in LoadBalancingConfig: %+v", lbCfg) + b.logger.Warningf("Received balancer config with no cluster name") return balancer.ErrBadResolverState } b.updateCh.Put(&ccUpdate{clusterName: lbCfg.ClusterName}) @@ -476,7 +532,7 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro // ResolverError handles errors reported by the xdsResolver. func (b *cdsBalancer) ResolverError(err error) { if b.closed.HasFired() { - b.logger.Warningf("xds: received resolver error {%v} after cdsBalancer was closed", err) + b.logger.Warningf("Received resolver error after close: %v", err) return } b.updateCh.Put(&ccUpdate{err: err}) @@ -485,15 +541,21 @@ func (b *cdsBalancer) ResolverError(err error) { // UpdateSubConnState handles subConn updates from gRPC. func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { if b.closed.HasFired() { - b.logger.Warningf("xds: received subConn update {%v, %v} after cdsBalancer was closed", sc, state) + b.logger.Warningf("Received subConn update after close: {%v, %v}", sc, state) return } b.updateCh.Put(&scUpdate{subConn: sc, state: state}) } -// Close closes the cdsBalancer and the underlying edsBalancer. +// Close cancels the CDS watch, closes the child policy and closes the +// cdsBalancer. func (b *cdsBalancer) Close() { b.closed.Fire() + <-b.done.Done() +} + +func (b *cdsBalancer) ExitIdle() { + b.updateCh.Put(exitIdle{}) } // ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go index 73459dd64101..fcd2e26960c0 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_security_test.go @@ -32,12 +32,11 @@ import ( "google.golang.org/grpc/internal" xdscredsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/testutils" - xdsinternal "google.golang.org/grpc/internal/xds" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/resolver" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -48,26 +47,27 @@ const ( ) var ( - testSANMatchers = []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP(testSAN), nil, nil, nil, nil, true), - xdsinternal.StringMatcherForTesting(nil, newStringP(testSAN), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP(testSAN), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(testSAN), false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP(testSAN), nil, false), + testSANMatchers = []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP(testSAN), nil, nil, nil, nil, true), + matcher.StringMatcherForTesting(nil, newStringP(testSAN), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP(testSAN), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, regexp.MustCompile(testSAN), false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP(testSAN), nil, false), } fpb1, fpb2 *fakeProviderBuilder bootstrapConfig *bootstrap.Config - cdsUpdateWithGoodSecurityCfg = xdsclient.ClusterUpdate{ - ServiceName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + cdsUpdateWithGoodSecurityCfg = xdsresource.ClusterUpdate{ + ClusterName: serviceName, + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default1", IdentityInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, }, + LBPolicy: wrrLocalityLBConfigJSON, } - cdsUpdateWithMissingSecurityCfg = xdsclient.ClusterUpdate{ - ServiceName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + cdsUpdateWithMissingSecurityCfg = xdsresource.ClusterUpdate{ + ClusterName: serviceName, + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "not-default", }, } @@ -129,13 +129,9 @@ func (p *fakeProvider) Close() { // setupWithXDSCreds performs all the setup steps required for tests which use // xDSCredentials. -func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { +func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *testutils.TestClientConn, func()) { t.Helper() - xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } - builder := balancer.Get(cdsName) if builder == nil { t.Fatalf("balancer.Get(%q) returned nil", cdsName) @@ -149,20 +145,20 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS } // Create a new CDS balancer and pass it a fake balancer.ClientConn which we // can use to inspect the different calls made by the balancer. - tcc := xdstestutils.NewTestClientConn(t) + tcc := testutils.NewTestClientConn(t) cdsB := builder.Build(tcc, balancer.BuildOptions{DialCreds: creds}) // Override the creation of the EDS balancer to return a fake EDS balancer // implementation. edsB := newTestEDSBalancer() - oldEDSBalancerBuilder := newEDSBalancer - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + oldEDSBalancerBuilder := newChildBalancer + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { edsB.parentCC = cc return edsB, nil } // Push a ClientConnState update to the CDS balancer with a cluster name. - if err := cdsB.UpdateClientConnState(cdsCCS(clusterName)); err != nil { + if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != nil { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } @@ -179,8 +175,7 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS } return xdsC, cdsB.(*cdsBalancer), edsB, tcc, func() { - newXDSClient = oldNewXDSClient - newEDSBalancer = oldEDSBalancerBuilder + newChildBalancer = oldEDSBalancerBuilder } } @@ -188,7 +183,7 @@ func setupWithXDSCreds(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDS // passed to the EDS balancer, and verifies that the CDS balancer forwards the // call appropriately to its parent balancer.ClientConn with or without // attributes bases on the value of wantFallback. -func makeNewSubConn(ctx context.Context, edsCC balancer.ClientConn, parentCC *xdstestutils.TestClientConn, wantFallback bool) (balancer.SubConn, error) { +func makeNewSubConn(ctx context.Context, edsCC balancer.ClientConn, parentCC *testutils.TestClientConn, wantFallback bool) (balancer.SubConn, error) { dummyAddr := "foo-address" addrs := []resolver.Address{{Addr: dummyAddr}} sc, err := edsCC.NewSubConn(addrs, balancer.NewSubConnOptions{}) @@ -253,9 +248,12 @@ func (s) TestSecurityConfigWithoutXDSCreds(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -308,10 +306,13 @@ func (s) TestNoSecurityConfigWithXDSCreds(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. No security config is + // newChildBalancer function as part of test setup. No security config is // passed to the CDS balancer as part of this update. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -466,8 +467,8 @@ func (s) TestSecurityConfigUpdate_BadToGood(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -500,8 +501,8 @@ func (s) TestGoodSecurityConfig(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -553,8 +554,8 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -570,7 +571,10 @@ func (s) TestSecurityConfigUpdate_GoodToFallback(t *testing.T) { // an update which contains bad security config. So, we expect the CDS // balancer to forward this error to the EDS balancer and eventually the // channel needs to be put in a bad state. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -603,8 +607,8 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdateWithGoodSecurityCfg, nil}, wantCCS, edsB); err != nil { @@ -638,7 +642,7 @@ func (s) TestSecurityConfigUpdate_GoodToBad(t *testing.T) { // registered watch should not be cancelled. sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } } @@ -674,15 +678,16 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ - ServiceName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + // newChildBalancer function as part of test setup. + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default1", SubjectAltNameMatchers: testSANMatchers, }, + LBPolicy: wrrLocalityLBConfigJSON, } - wantCCS := edsCCS(serviceName, nil, false) + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -700,12 +705,13 @@ func (s) TestSecurityConfigUpdate_GoodToGood(t *testing.T) { } // Push another update with a new security configuration. - cdsUpdate = xdsclient.ClusterUpdate{ - ServiceName: serviceName, - SecurityCfg: &xdsclient.SecurityConfig{ + cdsUpdate = xdsresource.ClusterUpdate{ + ClusterName: serviceName, + SecurityCfg: &xdsresource.SecurityConfig{ RootInstanceName: "default2", SubjectAltNameMatchers: testSANMatchers, }, + LBPolicy: wrrLocalityLBConfigJSON, } if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) diff --git a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go index 9c7bc2362ab7..19e937536917 100644 --- a/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go +++ b/xds/internal/balancer/cdsbalancer/cdsbalancer_test.go @@ -26,19 +26,22 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc/attributes" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpctest" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/edsbalancer" - "google.golang.org/grpc/xds/internal/client" - xdsclient "google.golang.org/grpc/xds/internal/client" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/balancer/clusterresolver" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( @@ -48,6 +51,31 @@ const ( defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. ) +var ( + defaultTestAuthorityServerConfig = &bootstrap.ServerConfig{ + ServerURI: "self_server", + Creds: bootstrap.ChannelCreds{ + Type: "insecure", + }, + } + noopODLBCfg = outlierdetection.LBConfig{} + noopODLBCfgJSON, _ = json.Marshal(noopODLBCfg) + wrrLocalityLBConfig = &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + } + wrrLocalityLBConfigJSON, _ = json.Marshal(wrrLocalityLBConfig) + ringHashLBConfig = &internalserviceconfig.BalancerConfig{ + Name: ringhash.Name, + Config: &ringhash.LBConfig{MinRingSize: 10, MaxRingSize: 100}, + } + ringHashLBConfigJSON, _ = json.Marshal(ringHashLBConfig) +) + type s struct { grpctest.Tester } @@ -58,7 +86,7 @@ func Test(t *testing.T) { // cdsWatchInfo wraps the update and the error sent in a CDS watch callback. type cdsWatchInfo struct { - update xdsclient.ClusterUpdate + update xdsresource.ClusterUpdate err error } @@ -83,7 +111,8 @@ type testEDSBalancer struct { // resolverErrCh is a channel used to signal a resolver error. resolverErrCh *testutils.Channel // closeCh is a channel used to signal the closing of this balancer. - closeCh *testutils.Channel + closeCh *testutils.Channel + exitIdleCh *testutils.Channel // parentCC is the balancer.ClientConn passed to this test balancer as part // of the Build() call. parentCC balancer.ClientConn @@ -100,6 +129,7 @@ func newTestEDSBalancer() *testEDSBalancer { scStateCh: testutils.NewChannel(), resolverErrCh: testutils.NewChannel(), closeCh: testutils.NewChannel(), + exitIdleCh: testutils.NewChannel(), } } @@ -120,6 +150,10 @@ func (tb *testEDSBalancer) Close() { tb.closeCh.Send(struct{}{}) } +func (tb *testEDSBalancer) ExitIdle() { + tb.exitIdleCh.Send(struct{}{}) +} + // waitForClientConnUpdate verifies if the testEDSBalancer receives the // provided ClientConnState within a reasonable amount of time. func (tb *testEDSBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS balancer.ClientConnState) error { @@ -128,8 +162,15 @@ func (tb *testEDSBalancer) waitForClientConnUpdate(ctx context.Context, wantCCS return err } gotCCS := ccs.(balancer.ClientConnState) - if !cmp.Equal(gotCCS, wantCCS, cmpopts.IgnoreUnexported(attributes.Attributes{})) { - return fmt.Errorf("received ClientConnState: %+v, want %+v", gotCCS, wantCCS) + if xdsclient.FromResolverState(gotCCS.ResolverState) == nil { + return fmt.Errorf("want resolver state with XDSClient attached, got one without") + } + + // Calls into Cluster Resolver LB Config Equal(), which ignores JSON + // configuration but compares the Parsed Configuration of the JSON fields + // emitted from ParseConfig() on the cluster resolver. + if diff := cmp.Diff(gotCCS, wantCCS, cmpopts.IgnoreFields(resolver.State{}, "Attributes"), cmp.AllowUnexported(clusterresolver.LBConfig{})); diff != "" { + return fmt.Errorf("received unexpected ClientConnState, diff (-got +want): %v", diff) } return nil } @@ -172,7 +213,7 @@ func (tb *testEDSBalancer) waitForClose(ctx context.Context) error { // cdsCCS is a helper function to construct a good update passed from the // xdsResolver to the cdsBalancer. -func cdsCCS(cluster string) balancer.ClientConnState { +func cdsCCS(cluster string, xdsC xdsclient.XDSClient) balancer.ClientConnState { const cdsLBConfig = `{ "loadBalancingConfig":[ { @@ -184,64 +225,96 @@ func cdsCCS(cluster string) balancer.ClientConnState { }` jsonSC := fmt.Sprintf(cdsLBConfig, cluster) return balancer.ClientConnState{ - ResolverState: resolver.State{ - ServiceConfig: internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(jsonSC), - }, + ResolverState: xdsclient.SetClient(resolver.State{ + ServiceConfig: internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC), + }, xdsC), BalancerConfig: &lbConfig{ClusterName: clusterName}, } } -// edsCCS is a helper function to construct a good update passed from the -// cdsBalancer to the edsBalancer. -func edsCCS(service string, countMax *uint32, enableLRS bool) balancer.ClientConnState { - lbCfg := &edsbalancer.EDSConfig{ - EDSServiceName: service, +// edsCCS is a helper function to construct a Client Conn update which +// represents what the CDS Balancer passes to the Cluster Resolver. It calls +// into Cluster Resolver's ParseConfig to get the service config to fill out the +// Client Conn State. This is to fill out unexported parts of the Cluster +// Resolver config struct. Returns an empty Client Conn State if it encounters +// an error building out the Client Conn State. +func edsCCS(service string, countMax *uint32, enableLRS bool, xdslbpolicy json.RawMessage, odConfig json.RawMessage) balancer.ClientConnState { + builder := balancer.Get(clusterresolver.Name) + if builder == nil { + // Shouldn't happen, registered through imported Cluster Resolver, + // defensive programming. + logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) + return balancer.ClientConnState{} // will fail the calling test eventually through error in diff. + } + crParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Cluster Resolver builder has this method. + logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) + return balancer.ClientConnState{} + } + discoveryMechanism := clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, + Cluster: service, MaxConcurrentRequests: countMax, + OutlierDetection: odConfig, } if enableLRS { - lbCfg.LrsLoadReportingServerName = new(string) + discoveryMechanism.LoadReportingServer = defaultTestAuthorityServerConfig } + lbCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: []clusterresolver.DiscoveryMechanism{discoveryMechanism}, + XDSLBPolicy: xdslbpolicy, + } + + crLBCfgJSON, err := json.Marshal(lbCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + logger.Errorf("cds_balancer: error marshalling prepared config: %v", lbCfg) + return balancer.ClientConnState{} + } + + var sc serviceconfig.LoadBalancingConfig + if sc, err = crParser.ParseConfig(crLBCfgJSON); err != nil { + logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", crLBCfgJSON, err) + return balancer.ClientConnState{} + } + return balancer.ClientConnState{ - BalancerConfig: lbCfg, + BalancerConfig: sc, } } // setup creates a cdsBalancer and an edsBalancer (and overrides the -// newEDSBalancer function to return it), and also returns a cleanup function. -func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { +// newChildBalancer function to return it), and also returns a cleanup function. +func setup(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *testutils.TestClientConn, func()) { t.Helper() - xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } - builder := balancer.Get(cdsName) if builder == nil { t.Fatalf("balancer.Get(%q) returned nil", cdsName) } - tcc := xdstestutils.NewTestClientConn(t) + tcc := testutils.NewTestClientConn(t) cdsB := builder.Build(tcc, balancer.BuildOptions{}) edsB := newTestEDSBalancer() - oldEDSBalancerBuilder := newEDSBalancer - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { + oldEDSBalancerBuilder := newChildBalancer + newChildBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions) (balancer.Balancer, error) { edsB.parentCC = cc return edsB, nil } return xdsC, cdsB.(*cdsBalancer), edsB, tcc, func() { - newEDSBalancer = oldEDSBalancerBuilder - newXDSClient = oldNewXDSClient + newChildBalancer = oldEDSBalancerBuilder } } // setupWithWatch does everything that setup does, and also pushes a ClientConn // update to the cdsBalancer and waits for a CDS watch call to be registered. -func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *xdstestutils.TestClientConn, func()) { +func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBalancer, *testutils.TestClientConn, func()) { t.Helper() xdsC, cdsB, edsB, tcc, cancel := setup(t) - if err := cdsB.UpdateClientConnState(cdsCCS(clusterName)); err != nil { + if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != nil { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } @@ -261,6 +334,8 @@ func setupWithWatch(t *testing.T) (*fakeclient.Client, *cdsBalancer, *testEDSBal // cdsBalancer with different inputs and verifies that the CDS watch API on the // provided xdsClient is invoked appropriately. func (s) TestUpdateClientConnState(t *testing.T) { + xdsC := fakeclient.NewClient() + tests := []struct { name string ccs balancer.ClientConnState @@ -279,14 +354,14 @@ func (s) TestUpdateClientConnState(t *testing.T) { }, { name: "happy-good-case", - ccs: cdsCCS(clusterName), + ccs: cdsCCS(clusterName, xdsC), wantCluster: clusterName, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - xdsC, cdsB, _, _, cancel := setup(t) + _, cdsB, _, _, cancel := setup(t) defer func() { cancel() cdsB.Close() @@ -323,7 +398,7 @@ func (s) TestUpdateClientConnStateWithSameState(t *testing.T) { }() // This is the same clientConn update sent in setupWithWatch(). - if err := cdsB.UpdateClientConnState(cdsCCS(clusterName)); err != nil { + if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != nil { t.Fatalf("cdsBalancer.UpdateClientConnState failed with error: %v", err) } // The above update should not result in a new watch being registered. @@ -339,6 +414,9 @@ func (s) TestUpdateClientConnStateWithSameState(t *testing.T) { // to the edsBalancer. func (s) TestHandleClusterUpdate(t *testing.T) { xdsC, cdsB, edsB, _, cancel := setupWithWatch(t) + xdsC.SetBootstrapConfig(&bootstrap.Config{ + XDSServer: defaultTestAuthorityServerConfig, + }) defer func() { cancel() cdsB.Close() @@ -346,19 +424,91 @@ func (s) TestHandleClusterUpdate(t *testing.T) { tests := []struct { name string - cdsUpdate xdsclient.ClusterUpdate + cdsUpdate xdsresource.ClusterUpdate updateErr error wantCCS balancer.ClientConnState }{ { - name: "happy-case-with-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName, EnableLRS: true}, - wantCCS: edsCCS(serviceName, nil, true), + name: "happy-case-with-lrs", + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LRSServerConfig: xdsresource.ClusterLRSServerSelf, + LBPolicy: wrrLocalityLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, true, wrrLocalityLBConfigJSON, noopODLBCfgJSON), + }, + { + name: "happy-case-without-lrs", + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON), + }, + { + name: "happy-case-with-ring-hash-lb-policy", + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: ringHashLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, false, ringHashLBConfigJSON, noopODLBCfgJSON), + }, + { + name: "happy-case-outlier-detection-xds-defaults", + // i.e. od proto set but no proto fields set + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + OutlierDetection: json.RawMessage(`{ + "successRateEjection": {} + }`), + LBPolicy: wrrLocalityLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, json.RawMessage(`{ + "successRateEjection": {} + }`)), }, { - name: "happy-case-without-lrs", - cdsUpdate: xdsclient.ClusterUpdate{ServiceName: serviceName}, - wantCCS: edsCCS(serviceName, nil, false), + name: "happy-case-outlier-detection-all-fields-set", + cdsUpdate: xdsresource.ClusterUpdate{ + ClusterName: serviceName, + OutlierDetection: json.RawMessage(`{ + "interval": "10s", + "baseEjectionTime": "30s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 10, + "successRateEjection": { + "stdevFactor": 1900, + "enforcementPercentage": 100, + "minimumHosts": 5, + "requestVolume": 100 + }, + "failurePercentageEjection": { + "threshold": 85, + "enforcementPercentage": 5, + "minimumHosts": 5, + "requestVolume": 50 + } + }`), + LBPolicy: wrrLocalityLBConfigJSON, + }, + wantCCS: edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, json.RawMessage(`{ + "interval": "10s", + "baseEjectionTime": "30s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 10, + "successRateEjection": { + "stdevFactor": 1900, + "enforcementPercentage": 100, + "minimumHosts": 5, + "requestVolume": 100 + }, + "failurePercentageEjection": { + "threshold": 85, + "enforcementPercentage": 5, + "minimumHosts": 5, + "requestVolume": 50 + } + }`)), }, } @@ -391,13 +541,13 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // resolver error at this point should result in the CDS balancer returning // an error picker. watcherErr := errors.New("cdsBalancer watcher error") - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, watcherErr) + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, watcherErr) // Since the error being pushed here is not a resource-not-found-error, the // registered watch should not be cancelled. sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } // The CDS balancer has not yet created an EDS balancer. So, this resolver @@ -424,19 +574,22 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } // Again push a non-resource-not-found-error through the watcher callback. - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, watcherErr) + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, watcherErr) // Make sure the registered watch is not cancelled. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } // Make sure the error is forwarded to the EDS balancer. @@ -445,13 +598,13 @@ func (s) TestHandleClusterUpdateError(t *testing.T) { } // Push a resource-not-found-error this time around. - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") - xdsC.InvokeWatchClusterCallback(xdsclient.ClusterUpdate{}, resourceErr) + resourceErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") + xdsC.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, resourceErr) // Make sure that the watch is not cancelled. This error indicates that the // request cluster resource is not found. We should continue to watch it. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a resource-not-found-error") } // Make sure the error is forwarded to the EDS balancer. @@ -483,7 +636,7 @@ func (s) TestResolverError(t *testing.T) { // registered watch should not be cancelled. sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } // The CDS balancer has not yet created an EDS balancer. So, this resolver @@ -509,9 +662,12 @@ func (s) TestResolverError(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { t.Fatal(err) } @@ -521,7 +677,7 @@ func (s) TestResolverError(t *testing.T) { // Make sure the registered watch is not cancelled. sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelClusterWatch(sCtx); err != context.DeadlineExceeded { t.Fatal("cluster watch cancelled for a non-resource-not-found-error") } // Make sure the error is forwarded to the EDS balancer. @@ -530,10 +686,10 @@ func (s) TestResolverError(t *testing.T) { } // Push a resource-not-found-error this time around. - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") + resourceErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "cdsBalancer resource not found error") cdsB.ResolverError(resourceErr) // Make sure the registered watch is cancelled. - if err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { + if _, err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { t.Fatalf("want watch to be canceled, watchForCancel failed: %v", err) } // Make sure the error is forwarded to the EDS balancer. @@ -558,9 +714,12 @@ func (s) TestUpdateSubConnState(t *testing.T) { // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -589,13 +748,16 @@ func (s) TestCircuitBreaking(t *testing.T) { cancel() cdsB.Close() }() - // Here we invoke the watch callback registered on the fake xdsClient. This // will trigger the watch handler on the CDS balancer, which will update // the service's counter with the new max requests. var maxRequests uint32 = 1 - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName, MaxRequests: &maxRequests} - wantCCS := edsCCS(serviceName, &maxRequests, false) + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: clusterName, + MaxRequests: &maxRequests, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(clusterName, &maxRequests, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -604,7 +766,7 @@ func (s) TestCircuitBreaking(t *testing.T) { // Since the counter's max requests was set to 1, the first request should // succeed and the second should fail. - counter := client.GetServiceRequestsCounter(serviceName) + counter := xdsclient.GetClusterRequestsCounter(clusterName, "") if err := counter.StartRequest(maxRequests); err != nil { t.Fatal(err) } @@ -614,21 +776,25 @@ func (s) TestCircuitBreaking(t *testing.T) { counter.EndRequest() } -// TestClose verifies the Close() method in the the CDS balancer. +// TestClose verifies the Close() method in the CDS balancer. func (s) TestClose(t *testing.T) { + grpctest.TLogger.ExpectError("cds-lb.*Received balancer config after close") + // This creates a CDS balancer, pushes a ClientConnState update with a fake // xdsClient, and makes sure that the CDS balancer registers a watch on the // provided xdsClient. xdsC, cdsB, edsB, _, cancel := setupWithWatch(t) defer cancel() - // Here we invoke the watch callback registered on the fake xdsClient. This // will trigger the watch handler on the CDS balancer, which will attempt to // create a new EDS balancer. The fake EDS balancer created above will be // returned to the CDS balancer, because we have overridden the - // newEDSBalancer function as part of test setup. - cdsUpdate := xdsclient.ClusterUpdate{ServiceName: serviceName} - wantCCS := edsCCS(serviceName, nil, false) + // newChildBalancer function as part of test setup. + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer ctxCancel() if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { @@ -640,7 +806,7 @@ func (s) TestClose(t *testing.T) { // Make sure that the cluster watch registered by the CDS balancer is // cancelled. - if err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { + if _, err := xdsC.WaitForCancelClusterWatch(ctx); err != nil { t.Fatal(err) } @@ -659,13 +825,13 @@ func (s) TestClose(t *testing.T) { // Make sure that the UpdateClientConnState() method on the CDS balancer // returns error. - if err := cdsB.UpdateClientConnState(cdsCCS(clusterName)); err != errBalancerClosed { + if err := cdsB.UpdateClientConnState(cdsCCS(clusterName, xdsC)); err != errBalancerClosed { t.Fatalf("UpdateClientConnState() after close returned %v, want %v", err, errBalancerClosed) } // Make sure that the UpdateSubConnState() method on the CDS balancer does // not forward the update to the EDS balancer. - cdsB.UpdateSubConnState(&xdstestutils.TestSubConn{}, balancer.SubConnState{}) + cdsB.UpdateSubConnState(&testutils.TestSubConn{}, balancer.SubConnState{}) sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if err := edsB.waitForSubConnUpdate(sCtx, subConnWithState{}); err != context.DeadlineExceeded { @@ -683,6 +849,38 @@ func (s) TestClose(t *testing.T) { } } +func (s) TestExitIdle(t *testing.T) { + // This creates a CDS balancer, pushes a ClientConnState update with a fake + // xdsClient, and makes sure that the CDS balancer registers a watch on the + // provided xdsClient. + xdsC, cdsB, edsB, _, cancel := setupWithWatch(t) + defer func() { + cancel() + cdsB.Close() + }() + + // Here we invoke the watch callback registered on the fake xdsClient. This + // will trigger the watch handler on the CDS balancer, which will attempt to + // create a new EDS balancer. The fake EDS balancer created above will be + // returned to the CDS balancer, because we have overridden the + // newChildBalancer function as part of test setup. + cdsUpdate := xdsresource.ClusterUpdate{ + ClusterName: serviceName, + LBPolicy: wrrLocalityLBConfigJSON, + } + wantCCS := edsCCS(serviceName, nil, false, wrrLocalityLBConfigJSON, noopODLBCfgJSON) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + if err := invokeWatchCbAndWait(ctx, xdsC, cdsWatchInfo{cdsUpdate, nil}, wantCCS, edsB); err != nil { + t.Fatal(err) + } + + // Call ExitIdle on the CDS balancer. + cdsB.ExitIdle() + + edsB.exitIdleCh.Receive(ctx) +} + // TestParseConfig verifies the ParseConfig() method in the CDS balancer. func (s) TestParseConfig(t *testing.T) { bb := balancer.Get(cdsName) diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler.go b/xds/internal/balancer/cdsbalancer/cluster_handler.go new file mode 100644 index 000000000000..aa2d9674a790 --- /dev/null +++ b/xds/internal/balancer/cdsbalancer/cluster_handler.go @@ -0,0 +1,368 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "encoding/json" + "errors" + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const maxDepth = 16 + +var ( + errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") + errExceedsMaxDepth = errors.New("aggregate cluster graph exceeds max depth") +) + +// clusterHandlerUpdate wraps the information received from the registered CDS +// watcher. A non-nil error is propagated to the underlying cluster_resolver +// balancer. A valid update results in creating a new cluster_resolver balancer +// (if one doesn't already exist) and pushing the update to it. +type clusterHandlerUpdate struct { + // securityCfg is the Security Config from the top (root) cluster. + securityCfg *xdsresource.SecurityConfig + + // lbPolicy is the the child of the cluster_impl policy, for all priorities. + lbPolicy json.RawMessage + + // updates is a list of ClusterUpdates from all the leaf clusters. + updates []xdsresource.ClusterUpdate + err error +} + +// clusterHandler will be given a name representing a cluster. It will then +// update the CDS policy constantly with a list of Clusters to pass down to +// XdsClusterResolverLoadBalancingPolicyConfig in a stream like fashion. +type clusterHandler struct { + parent *cdsBalancer + + // A mutex to protect entire tree of clusters. + clusterMutex sync.Mutex + rootClusterName string + + createdClusters map[string]*clusterNode + + // A way to ping CDS Balancer about any updates or errors to a Node in the + // tree. This will either get called from this handler constructing an + // update or from a child with an error. Capacity of one as the only update + // CDS Balancer cares about is the most recent update. + updateChannel chan clusterHandlerUpdate +} + +func newClusterHandler(parent *cdsBalancer) *clusterHandler { + return &clusterHandler{ + parent: parent, + updateChannel: make(chan clusterHandlerUpdate, 1), + createdClusters: make(map[string]*clusterNode), + } +} + +func (ch *clusterHandler) updateRootCluster(rootClusterName string) { + ch.clusterMutex.Lock() + defer ch.clusterMutex.Unlock() + if ch.createdClusters[ch.rootClusterName] == nil { + // Construct a root node on first update. + createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) + ch.rootClusterName = rootClusterName + return + } + // Check if root cluster was changed. If it was, delete old one and start + // new one, if not do nothing. + if rootClusterName != ch.rootClusterName { + ch.createdClusters[ch.rootClusterName].delete() + createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) + ch.rootClusterName = rootClusterName + } +} + +// This function tries to construct a cluster update to send to CDS. +func (ch *clusterHandler) constructClusterUpdate() { + if ch.createdClusters[ch.rootClusterName] == nil { + // If root is nil, this handler is closed, ignore the update. + return + } + clusterUpdate, err := ch.createdClusters[ch.rootClusterName].constructClusterUpdate(make(map[string]bool)) + if err != nil { + // If there was an error received no op, as this can mean one of the + // children hasn't received an update yet, or the graph continued to + // stay in an error state. If the graph continues to stay in an error + // state, no new error needs to be written to the update buffer as that + // would be redundant information. + return + } + if clusterUpdate == nil { + // This means that there was an aggregated cluster with no EDS or DNS as + // leaf nodes. No update to be written. + return + } + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-ch.updateChannel: + default: + } + + ch.updateChannel <- clusterHandlerUpdate{ + securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg, + lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy, + updates: clusterUpdate, + } +} + +// close() is meant to be called by CDS when the CDS balancer is closed, and it +// cancels the watches for every cluster in the cluster tree. +func (ch *clusterHandler) close() { + ch.clusterMutex.Lock() + defer ch.clusterMutex.Unlock() + if ch.createdClusters[ch.rootClusterName] == nil { + return + } + ch.createdClusters[ch.rootClusterName].delete() + ch.rootClusterName = "" +} + +// This logically represents a cluster. This handles all the logic for starting +// and stopping a cluster watch, handling any updates, and constructing a list +// recursively for the ClusterHandler. +type clusterNode struct { + // A way to cancel the watch for the cluster. + cancelFunc func() + + // A list of children, as the Node can be an aggregate Cluster. + children []string + + // A ClusterUpdate in order to build a list of cluster updates for CDS to + // send down to child XdsClusterResolverLoadBalancingPolicy. + clusterUpdate xdsresource.ClusterUpdate + + // This boolean determines whether this Node has received an update or not. + // This isn't the best practice, but this will protect a list of Cluster + // Updates from being constructed if a cluster in the tree has not received + // an update yet. + receivedUpdate bool + + clusterHandler *clusterHandler + + depth int32 + refCount int32 + + // maxDepthErr is set if this cluster node is an aggregate cluster and has a + // child that causes the graph to exceed the maximum depth allowed. This is + // used to show a cluster graph as being in an error state when it constructs + // a cluster update. + maxDepthErr error +} + +// CreateClusterNode creates a cluster node from a given clusterName. This will +// also start the watch for that cluster. +func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler, depth int32) { + // If the cluster has already been created, simply return, which ignores + // duplicates. + if topLevelHandler.createdClusters[clusterName] != nil { + topLevelHandler.createdClusters[clusterName].refCount++ + return + } + c := &clusterNode{ + clusterHandler: topLevelHandler, + depth: depth, + refCount: 1, + } + // Communicate with the xds client here. + topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName) + cancel := xdsClient.WatchCluster(clusterName, c.handleResp) + c.cancelFunc = func() { + topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName) + cancel() + } + topLevelHandler.createdClusters[clusterName] = c +} + +// This function cancels the cluster watch on the cluster and all of it's +// children. +func (c *clusterNode) delete() { + c.refCount-- + if c.refCount == 0 { + c.cancelFunc() + delete(c.clusterHandler.createdClusters, c.clusterUpdate.ClusterName) + for _, child := range c.children { + if c.clusterHandler.createdClusters[child] != nil { + c.clusterHandler.createdClusters[child].delete() + } + } + } +} + +// Construct cluster update (potentially a list of ClusterUpdates) for a node. +func (c *clusterNode) constructClusterUpdate(clustersSeen map[string]bool) ([]xdsresource.ClusterUpdate, error) { + // If the cluster has not yet received an update, the cluster update is not + // yet ready. + if !c.receivedUpdate { + return nil, errNotReceivedUpdate + } + if c.maxDepthErr != nil { + return nil, c.maxDepthErr + } + // Ignore duplicates. It's ok to ignore duplicates because the second + // occurrence of a cluster will never be used. I.e. in [C, D, C], the second + // C will never be used (the only way to fall back to lower priority D is if + // C is down, which means second C will never be chosen). Thus, [C, D, C] is + // logically equivalent to [C, D]. + if clustersSeen[c.clusterUpdate.ClusterName] { + return []xdsresource.ClusterUpdate{}, nil + } + clustersSeen[c.clusterUpdate.ClusterName] = true + + // Base case - LogicalDNS or EDS. Both of these cluster types will be tied + // to a single ClusterUpdate. + if c.clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { + return []xdsresource.ClusterUpdate{c.clusterUpdate}, nil + } + + // If an aggregate construct a list by recursively calling down to all of + // it's children. + var childrenUpdates []xdsresource.ClusterUpdate + for _, child := range c.children { + childUpdateList, err := c.clusterHandler.createdClusters[child].constructClusterUpdate(clustersSeen) + if err != nil { + return nil, err + } + childrenUpdates = append(childrenUpdates, childUpdateList...) + } + return childrenUpdates, nil +} + +// handleResp handles a xds response for a particular cluster. This function +// also handles any logic with regards to any child state that may have changed. +// At the end of the handleResp(), the clusterUpdate will be pinged in certain +// situations to try and construct an update to send back to CDS. +func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err error) { + c.clusterHandler.clusterMutex.Lock() + defer c.clusterHandler.clusterMutex.Unlock() + if err != nil { // Write this error for run() to pick up in CDS LB policy. + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-c.clusterHandler.updateChannel: + default: + } + c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err} + c.receivedUpdate = false + c.maxDepthErr = nil + return + } + + c.receivedUpdate = true + c.clusterUpdate = clusterUpdate + + // If the cluster was a leaf node, if the cluster update received had change + // in the cluster update then the overall cluster update would change and + // there is a possibility for the overall update to build so ping cluster + // handler to return. Also, if there was any children from previously, + // delete the children, as the cluster type is no longer an aggregate + // cluster. + if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { + for _, child := range c.children { + c.clusterHandler.createdClusters[child].delete() + } + c.children = nil + c.maxDepthErr = nil + // This is an update in the one leaf node, should try to send an update + // to the parent CDS balancer. + // + // Note that this update might be a duplicate from the previous one. + // Because the update contains not only the cluster name to watch, but + // also the extra fields (e.g. security config). There's no good way to + // compare all the fields. + c.clusterHandler.constructClusterUpdate() + return + } + + // Aggregate cluster handling. + if len(clusterUpdate.PrioritizedClusterNames) >= 1 { + if c.depth == maxDepth-1 { + // For a ClusterUpdate, the only update CDS cares about is the most + // recent one, so opportunistically drain the update channel before + // sending the new update. + select { + case <-c.clusterHandler.updateChannel: + default: + } + c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: errExceedsMaxDepth} + c.children = []string{} + c.maxDepthErr = errExceedsMaxDepth + return + } + } + + newChildren := make(map[string]bool) + for _, childName := range clusterUpdate.PrioritizedClusterNames { + newChildren[childName] = true + } + + // These booleans help determine whether this callback will ping the overall + // clusterHandler to try and construct an update to send back to CDS. This + // will be determined by whether there would be a change in the overall + // clusterUpdate for the whole tree (ex. change in clusterUpdate for current + // cluster or a deleted child) and also if there's even a possibility for + // the update to build (ex. if a child is created and a watch is started, + // that child hasn't received an update yet due to the mutex lock on this + // callback). + var createdChild bool + + // This map will represent the current children of the cluster. It will be + // first added to in order to represent the new children. It will then have + // any children deleted that are no longer present. + mapCurrentChildren := make(map[string]bool) + for _, child := range c.children { + mapCurrentChildren[child] = true + } + + // Add and construct any new child nodes. + for child := range newChildren { + if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { + createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler, c.depth+1) + } + } + + // Delete any child nodes no longer in the aggregate cluster's children. + for child := range mapCurrentChildren { + if _, stillAChild := newChildren[child]; !stillAChild { + c.clusterHandler.createdClusters[child].delete() + delete(mapCurrentChildren, child) + } + } + + c.children = clusterUpdate.PrioritizedClusterNames + + c.maxDepthErr = nil + // If the cluster is an aggregate cluster, if this callback created any new + // child cluster nodes, then there's no possibility for a full cluster + // update to successfully build, as those created children will not have + // received an update yet. Even if this update did not delete a child, there + // is still a possibility for the cluster update to build, as the aggregate + // cluster can ignore duplicated children and thus the update can fill out + // the full cluster update tree. + if !createdChild { + c.clusterHandler.constructClusterUpdate() + } +} diff --git a/xds/internal/balancer/cdsbalancer/cluster_handler_test.go b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go new file mode 100644 index 000000000000..ee989ec3ef73 --- /dev/null +++ b/xds/internal/balancer/cdsbalancer/cluster_handler_test.go @@ -0,0 +1,1095 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const ( + edsService = "EDS Service" + logicalDNSService = "Logical DNS Service" + edsService2 = "EDS Service 2" + logicalDNSService2 = "Logical DNS Service 2" + aggregateClusterService = "Aggregate Cluster Service" +) + +// setupTests creates a clusterHandler with a fake xds client for control over +// xds client. +func setupTests() (*clusterHandler, *fakeclient.Client) { + xdsC := fakeclient.NewClient() + ch := newClusterHandler(&cdsBalancer{xdsClient: xdsC}) + return ch, xdsC +} + +// Simplest case: the cluster handler receives a cluster name, handler starts a +// watch for that cluster, xds client returns that it is a Leaf Node (EDS or +// LogicalDNS), not a tree, so expectation that update is written to buffer +// which will be read by CDS LB. +func (s) TestSuccessCaseLeafNode(t *testing.T) { + tests := []struct { + name string + clusterName string + clusterUpdate xdsresource.ClusterUpdate + }{ + { + name: "test-update-root-cluster-EDS-success", + clusterName: edsService, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService, + }, + }, + { + name: "test-update-root-cluster-Logical-DNS-success", + clusterName: logicalDNSService, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ch, fakeClient := setupTests() + // When you first update the root cluster, it should hit the code + // path which will start a cluster node for that root. Updating the + // root cluster logically represents a ping from a ClientConn. + ch.updateRootCluster(test.clusterName) + // Starting a cluster node involves communicating with the + // xdsClient, telling it to watch a cluster. + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != test.clusterName { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, test.clusterName) + } + // Invoke callback with xds client with a certain clusterUpdate. Due + // to this cluster update filling out the whole cluster tree, as the + // cluster is of a root type (EDS or Logical DNS) and not an + // aggregate cluster, this should trigger the ClusterHandler to + // write to the update buffer to update the CDS policy. + fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.clusterUpdate}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + // Close the clusterHandler. This is meant to be called when the CDS + // Balancer is closed, and the call should cancel the watch for this + // cluster. + ch.close() + clusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != test.clusterName { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + }) + } +} + +// The cluster handler receives a cluster name, handler starts a watch for that +// cluster, xds client returns that it is a Leaf Node (EDS or LogicalDNS), not a +// tree, so expectation that first update is written to buffer which will be +// read by CDS LB. Then, send a new cluster update that is different, with the +// expectation that it is also written to the update buffer to send back to CDS. +func (s) TestSuccessCaseLeafNodeThenNewUpdate(t *testing.T) { + tests := []struct { + name string + clusterName string + clusterUpdate xdsresource.ClusterUpdate + newClusterUpdate xdsresource.ClusterUpdate + }{ + {name: "test-update-root-cluster-then-new-update-EDS-success", + clusterName: edsService, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService, + }, + newClusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService2, + }, + }, + { + name: "test-update-root-cluster-then-new-update-Logical-DNS-success", + clusterName: logicalDNSService, + clusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService, + }, + newClusterUpdate: xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService2, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster(test.clusterName) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) + select { + case <-ch.updateChannel: + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from updateChannel.") + } + + // Check that sending the same cluster update also induces an update + // to be written to update buffer. + fakeClient.InvokeWatchClusterCallback(test.clusterUpdate, nil) + shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + select { + case <-ch.updateChannel: + case <-shouldNotHappenCtx.Done(): + t.Fatal("Timed out waiting for update from updateChannel.") + } + + // Above represents same thing as the simple + // TestSuccessCaseLeafNode, extra behavior + validation (clusterNode + // which is a leaf receives a changed clusterUpdate, which should + // ping clusterHandler, which should then write to the update + // buffer). + fakeClient.InvokeWatchClusterCallback(test.newClusterUpdate, nil) + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{test.newClusterUpdate}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from updateChannel.") + } + }) + } +} + +// TestUpdateRootClusterAggregateSuccess tests the case where an aggregate +// cluster is a root pointing to two child clusters one of type EDS and the +// other of type LogicalDNS. This test will then send cluster updates for both +// the children, and at the end there should be a successful clusterUpdate +// written to the update buffer to send back to CDS. +func (s) TestUpdateRootClusterAggregateSuccess(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster(aggregateClusterService) + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != aggregateClusterService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, aggregateClusterService) + } + + // The xdsClient telling the clusterNode that the cluster type is an + // aggregate cluster which will cause a lot of downstream behavior. For a + // cluster type that isn't an aggregate, the behavior is simple. The + // clusterNode will simply get a successful update, which will then ping the + // clusterHandler which will successfully build an update to send to the CDS + // policy. In the aggregate cluster case, the handleResp callback must also + // start watches for the aggregate cluster's children. The ping to the + // clusterHandler at the end of handleResp should be a no-op, as neither the + // EDS or LogicalDNS child clusters have received an update yet. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: aggregateClusterService, + PrioritizedClusterNames: []string{edsService, logicalDNSService}, + }, nil) + + // xds client should be called to start a watch for one of the child + // clusters of the aggregate. The order of the children in the update + // written to the buffer to send to CDS matters, however there is no + // guarantee on the order it will start the watches of the children. + gotCluster, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService { + if gotCluster != logicalDNSService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, edsService) + } + } + + // xds client should then be called to start a watch for the second child + // cluster. + gotCluster, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService { + if gotCluster != logicalDNSService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, logicalDNSService) + } + } + + // The handleResp() call on the root aggregate cluster should not ping the + // cluster handler to try and construct an update, as the handleResp() + // callback knows that when a child is created, it cannot possibly build a + // successful update yet. Thus, there should be nothing in the update + // channel. + + shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + + select { + case <-ch.updateChannel: + t.Fatal("Cluster Handler wrote an update to updateChannel when it shouldn't have, as each node in the full cluster tree has not yet received an update") + case <-shouldNotHappenCtx.Done(): + } + + // Send callback for the EDS child cluster. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService, + }, nil) + + // EDS child cluster will ping the Cluster Handler, to try an update, which + // still won't successfully build as the LogicalDNS child of the root + // aggregate cluster has not yet received and handled an update. + select { + case <-ch.updateChannel: + t.Fatal("Cluster Handler wrote an update to updateChannel when it shouldn't have, as each node in the full cluster tree has not yet received an update") + case <-shouldNotHappenCtx.Done(): + } + + // Invoke callback for Logical DNS child cluster. + + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService, + }, nil) + + // Will Ping Cluster Handler, which will finally successfully build an + // update as all nodes in the tree of clusters have received an update. + // Since this cluster is an aggregate cluster comprised of two children, the + // returned update should be length 2, as the xds cluster resolver LB policy + // only cares about the full list of LogicalDNS and EDS clusters + // representing the base nodes of the tree of clusters. This list should be + // ordered as per the cluster update. + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService, + }, { + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService, + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestUpdateRootClusterAggregateThenChangeChild tests the scenario where you +// have an aggregate cluster with an EDS child and a LogicalDNS child, then you +// change one of the children and send an update for the changed child. This +// should write a new update to the update buffer to send back to CDS. +func (s) TestUpdateRootClusterAggregateThenChangeChild(t *testing.T) { + // This initial code is the same as the test for the aggregate success case, + // except without validations. This will get this test to the point where it + // can change one of the children. + ch, fakeClient := setupTests() + ch.updateRootCluster(aggregateClusterService) + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: aggregateClusterService, + PrioritizedClusterNames: []string{edsService, logicalDNSService}, + }, nil) + fakeClient.WaitForWatchCluster(ctx) + fakeClient.WaitForWatchCluster(ctx) + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService, + }, nil) + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService, + }, nil) + + select { + case <-ch.updateChannel: + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } + + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: aggregateClusterService, + PrioritizedClusterNames: []string{edsService, logicalDNSService2}, + }, nil) + + // The cluster update let's the aggregate cluster know that it's children + // are now edsService and logicalDNSService2, which implies that the + // aggregateCluster lost it's old logicalDNSService child. Thus, the + // logicalDNSService child should be deleted. + clusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != logicalDNSService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + + // The handleResp() callback should then start a watch for + // logicalDNSService2. + clusterNameCreated, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if clusterNameCreated != logicalDNSService2 { + t.Fatalf("xdsClient.WatchCDS called for cluster %v, want: %v", clusterNameCreated, logicalDNSService2) + } + + // handleResp() should try and send an update here, but it will fail as + // logicalDNSService2 has not yet received an update. + shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + select { + case <-ch.updateChannel: + t.Fatal("Cluster Handler wrote an update to updateChannel when it shouldn't have, as each node in the full cluster tree has not yet received an update") + case <-shouldNotHappenCtx.Done(): + } + + // Invoke a callback for the new logicalDNSService2 - this will fill out the + // tree with successful updates. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService2, + }, nil) + + // Behavior: This update make every node in the tree of cluster have + // received an update. Thus, at the end of this callback, when you ping the + // clusterHandler to try and construct an update, the update should now + // successfully be written to update buffer to send back to CDS. This new + // update should contain the new child of LogicalDNS2. + + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService, + }, { + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService2, + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestUpdateRootClusterAggregateThenChangeRootToEDS tests the situation where +// you have a fully updated aggregate cluster (where AggregateCluster success +// test gets you) as the root cluster, then you update that root cluster to a +// cluster of type EDS. +func (s) TestUpdateRootClusterAggregateThenChangeRootToEDS(t *testing.T) { + // This initial code is the same as the test for the aggregate success case, + // except without validations. This will get this test to the point where it + // can update the root cluster to one of type EDS. + ch, fakeClient := setupTests() + ch.updateRootCluster(aggregateClusterService) + + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: aggregateClusterService, + PrioritizedClusterNames: []string{edsService, logicalDNSService}, + }, nil) + fakeClient.WaitForWatchCluster(ctx) + fakeClient.WaitForWatchCluster(ctx) + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService, + }, nil) + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeLogicalDNS, + ClusterName: logicalDNSService, + }, nil) + + select { + case <-ch.updateChannel: + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } + + // Changes the root aggregate cluster to a EDS cluster. This should delete + // the root aggregate cluster and all of it's children by successfully + // canceling the watches for them. + ch.updateRootCluster(edsService2) + + // Reads from the cancel channel, should first be type Aggregate, then EDS + // then Logical DNS. + clusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != aggregateClusterService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + + clusterNameDeleted, err = fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != edsService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + + clusterNameDeleted, err = fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != logicalDNSService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want: %v", clusterNameDeleted, logicalDNSService) + } + + // After deletion, it should start a watch for the EDS Cluster. The behavior + // for this EDS Cluster receiving an update from xds client and then + // successfully writing an update to send back to CDS is already tested in + // the updateEDS success case. + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService2 { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, edsService2) + } +} + +// TestHandleRespInvokedWithError tests that when handleResp is invoked with an +// error, that the error is successfully written to the update buffer. +func (s) TestHandleRespInvokedWithError(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster(edsService) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{}, errors.New("some error")) + select { + case chu := <-ch.updateChannel: + if chu.err.Error() != "some error" { + t.Fatalf("Did not receive the expected error, instead received: %v", chu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } +} + +// TestSwitchClusterNodeBetweenLeafAndAggregated tests having an existing +// cluster node switch between a leaf and an aggregated cluster. When the +// cluster switches from a leaf to an aggregated cluster, it should add +// children, and when it switches back to a leaf, it should delete those new +// children and also successfully write a cluster update to the update buffer. +func (s) TestSwitchClusterNodeBetweenLeafAndAggregated(t *testing.T) { + // Getting the test to the point where there's a root cluster which is a eds + // leaf. + ch, fakeClient := setupTests() + ch.updateRootCluster(edsService2) + ctx, ctxCancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer ctxCancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService2, + }, nil) + select { + case <-ch.updateChannel: + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + // Switch the cluster to an aggregate cluster, this should cause two new + // child watches to be created. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: edsService2, + PrioritizedClusterNames: []string{edsService, logicalDNSService}, + }, nil) + + // xds client should be called to start a watch for one of the child + // clusters of the aggregate. The order of the children in the update + // written to the buffer to send to CDS matters, however there is no + // guarantee on the order it will start the watches of the children. + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService { + if gotCluster != logicalDNSService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, edsService) + } + } + + // xds client should then be called to start a watch for the second child + // cluster. + gotCluster, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != edsService { + if gotCluster != logicalDNSService { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, logicalDNSService) + } + } + + // After starting a watch for the second child cluster, there should be no + // more watches started on the xds client. + shouldNotHappenCtx, shouldNotHappenCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + gotCluster, err = fakeClient.WaitForWatchCluster(shouldNotHappenCtx) + if err == nil { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, no more watches should be started.", gotCluster) + } + + // The handleResp() call on the root aggregate cluster should not ping the + // cluster handler to try and construct an update, as the handleResp() + // callback knows that when a child is created, it cannot possibly build a + // successful update yet. Thus, there should be nothing in the update + // channel. + + shouldNotHappenCtx, shouldNotHappenCtxCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + + select { + case <-ch.updateChannel: + t.Fatal("Cluster Handler wrote an update to updateChannel when it shouldn't have, as each node in the full cluster tree has not yet received an update") + case <-shouldNotHappenCtx.Done(): + } + + // Switch the cluster back to an EDS Cluster. This should cause the two + // children to be deleted. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService2, + }, nil) + + // Should delete the two children (no guarantee of ordering deleted, which + // is ok), then successfully write an update to the update buffer as the + // full cluster tree has received updates. + clusterNameDeleted, err := fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + // No guarantee of ordering, so one of the children should be deleted first. + if clusterNameDeleted != edsService { + if clusterNameDeleted != logicalDNSService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want either: %v or: %v", clusterNameDeleted, edsService, logicalDNSService) + } + } + // Then the other child should be deleted. + clusterNameDeleted, err = fakeClient.WaitForCancelClusterWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelCDS failed with error: %v", err) + } + if clusterNameDeleted != edsService { + if clusterNameDeleted != logicalDNSService { + t.Fatalf("xdsClient.CancelCDS called for cluster %v, want either: %v or: %v", clusterNameDeleted, edsService, logicalDNSService) + } + } + + // After cancelling a watch for the second child cluster, there should be no + // more watches cancelled on the xds client. + shouldNotHappenCtx, shouldNotHappenCtxCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer shouldNotHappenCtxCancel() + gotCluster, err = fakeClient.WaitForCancelClusterWatch(shouldNotHappenCtx) + if err == nil { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, no more watches should be cancelled.", gotCluster) + } + + // Then an update should successfully be written to the update buffer. + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: edsService2, + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } +} + +// TestExceedsMaxStackDepth tests the scenario where an aggregate cluster +// exceeds the maximum depth, which is 16. This should cause an error to be +// written to the update buffer. +func (s) TestExceedsMaxStackDepth(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("cluster0") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + for i := 0; i <= 15; i++ { + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "cluster" + fmt.Sprint(i), + PrioritizedClusterNames: []string{"cluster" + fmt.Sprint(i+1)}, + }, nil) + if i == 15 { + // The 16th iteration will try and create a cluster which exceeds + // max stack depth and will thus error, so no CDS Watch will be + // started for the child. + continue + } + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + } + select { + case chu := <-ch.updateChannel: + if chu.err.Error() != "aggregate cluster graph exceeds max depth" { + t.Fatalf("Did not receive the expected error, instead received: %v", chu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for an error to be written to update channel.") + } +} + +// TestDiamondDependency tests a diamond shaped aggregate cluster (A->[B,C]; +// B->D; C->D). Due to both B and C pointing to D as it's child, it should be +// ignored for C. Once all 4 clusters have received a CDS update, an update +// should be then written to the update buffer, specifying a single Cluster D. +func (s) TestDiamondDependency(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("clusterA") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterB", "clusterC"}, + }, nil) + // Two watches should be started for both child clusters. + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + // B -> D. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterB", + PrioritizedClusterNames: []string{"clusterD"}, + }, nil) + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + // This shouldn't cause an update to be written to the update buffer, + // as cluster C has not received a cluster update yet. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }, nil) + + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("an update should not have been written to the update buffer") + case <-sCtx.Done(): + } + + // This update for C should cause an update to be written to the update + // buffer. When you search this aggregated cluster graph, each node has + // received an update. This update should only contain one clusterD, as + // clusterC does not add a clusterD child update due to the clusterD update + // already having been added as a child of clusterB. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterC", + PrioritizedClusterNames: []string{"clusterD"}, + }, nil) + + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestIgnoreDups tests the cluster (A->[B, C]; B->[C, D]). Only one watch +// should be started for cluster C. The update written to the update buffer +// should only contain one instance of cluster C correctly as a higher priority +// than D. +func (s) TestIgnoreDups(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("clusterA") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterB", "clusterC"}, + }, nil) + // Two watches should be started, one for each child cluster. + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + // The child cluster C should not have a watch started for it, as it is + // already part of the aggregate cluster graph as the child of the root + // cluster clusterA and has already had a watch started for it. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterB", + PrioritizedClusterNames: []string{"clusterC", "clusterD"}, + }, nil) + // Only one watch should be started, which should be for clusterD. + name, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if name != "clusterD" { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: clusterD", name) + } + + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err = fakeClient.WaitForWatchCluster(sCtx); err == nil { + t.Fatalf("only one watch should have been started for the children of clusterB") + } + + // This update should not cause an update to be written to the update + // buffer, as each cluster in the tree has not yet received a cluster + // update. With cluster B ignoring cluster C, the system should function as + // if cluster C was not a child of cluster B (meaning all 4 clusters should + // be required to get an update). + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterC", + }, nil) + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("an update should not have been written to the update buffer") + case <-sCtx.Done(): + } + + // This update causes all 4 clusters in the aggregated cluster graph to have + // received an update, so an update should be written to the update buffer + // with only a single occurrence of cluster C as a higher priority than + // cluster D. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }, nil) + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterC", + }, { + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } + + // Delete A's ref to C by updating A with only child B. Since B still has a + // reference to C, C's watch should not be canceled, and also an update + // should correctly be built. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterB"}, + }, nil) + + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterC", + }, { + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "clusterD", + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestErrorStateWholeTree tests the scenario where the aggregate cluster graph +// exceeds max depth. An error should be written to the update channel. +// Afterward, if a valid response comes in for another cluster, no update should +// be written to the update channel, as the aggregate cluster graph is still in +// the same error state. +func (s) TestErrorStateWholeTree(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("cluster0") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + + for i := 0; i <= 15; i++ { + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "cluster" + fmt.Sprint(i), + PrioritizedClusterNames: []string{"cluster" + fmt.Sprint(i+1)}, + }, nil) + if i == 15 { + // The 16th iteration will try and create a cluster which exceeds + // max stack depth and will thus error, so no CDS Watch will be + // started for the child. + continue + } + _, err = fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + } + select { + case chu := <-ch.updateChannel: + if chu.err.Error() != "aggregate cluster graph exceeds max depth" { + t.Fatalf("Did not receive the expected error, instead received: %v", chu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for an error to be written to update channel.") + } + + // Invoke a cluster callback for a node in the graph that rests within the + // allowed depth. This will cause the system to try and construct a cluster + // update, and it shouldn't write an update as the aggregate cluster graph + // is still in an error state. Since the graph continues to stay in an error + // state, no new error needs to be written to the update buffer as that + // would be redundant information. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "cluster3", + PrioritizedClusterNames: []string{"cluster4"}, + }, nil) + + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("an update should not have been written to the update buffer") + case <-sCtx.Done(): + } + + // Invoke the same cluster update for cluster 15, specifying it has a child + // cluster16. This should cause an error to be written to the update buffer, + // as it still exceeds the max depth. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "cluster15", + PrioritizedClusterNames: []string{"cluster16"}, + }, nil) + select { + case chu := <-ch.updateChannel: + if chu.err.Error() != "aggregate cluster graph exceeds max depth" { + t.Fatalf("Did not receive the expected error, instead received: %v", chu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for an error to be written to update channel.") + } + + // When you remove the child of cluster15 that causes the graph to be in the + // error state of exceeding max depth, the update should successfully + // construct and be written to the update buffer. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "cluster15", + }, nil) + + select { + case chu := <-ch.updateChannel: + if diff := cmp.Diff(chu.updates, []xdsresource.ClusterUpdate{{ + ClusterType: xdsresource.ClusterTypeEDS, + ClusterName: "cluster15", + }}); diff != "" { + t.Fatalf("got unexpected cluster update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the cluster update to be written to the update buffer.") + } +} + +// TestNodeChildOfItself tests the scenario where the aggregate cluster graph +// has a node that has child node of itself. The case for this is A -> A, and +// since there is no base cluster (EDS or Logical DNS), no update should be +// written if it tries to build a cluster update. +func (s) TestNodeChildOfItself(t *testing.T) { + ch, fakeClient := setupTests() + ch.updateRootCluster("clusterA") + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + _, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + // Invoke the callback informing the cluster handler that clusterA has a + // child that it is itself. Due to this child cluster being a duplicate, no + // watch should be started. Since there are no leaf nodes (i.e. EDS or + // Logical DNS), no update should be written to the update buffer. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterA"}, + }, nil) + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := fakeClient.WaitForWatchCluster(sCtx); err == nil { + t.Fatal("Watch should not have been started for clusterA") + } + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("update should not have been written to update buffer") + case <-sCtx.Done(): + } + + // Invoke the callback again informing the cluster handler that clusterA has + // a child that it is itself. Due to this child cluster being a duplicate, + // no watch should be started. Since there are no leaf nodes (i.e. EDS or + // Logical DNS), no update should be written to the update buffer. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterA"}, + }, nil) + + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := fakeClient.WaitForWatchCluster(sCtx); err == nil { + t.Fatal("Watch should not have been started for clusterA") + } + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("update should not have been written to update buffer, as clusterB has not received an update yet") + case <-sCtx.Done(): + } + + // Inform the cluster handler that clusterA now has clusterB as a child. + // This should not cancel the watch for A, as it is still the root cluster + // and still has a ref count, not write an update to update buffer as + // cluster B has not received an update yet, and start a new watch for + // cluster B as it is not a duplicate. + fakeClient.InvokeWatchClusterCallback(xdsresource.ClusterUpdate{ + ClusterType: xdsresource.ClusterTypeAggregate, + ClusterName: "clusterA", + PrioritizedClusterNames: []string{"clusterB"}, + }, nil) + + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := fakeClient.WaitForCancelClusterWatch(sCtx); err == nil { + t.Fatal("clusterA should not have been canceled, as it is still the root cluster") + } + + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-ch.updateChannel: + t.Fatal("update should not have been written to update buffer, as clusterB has not received an update yet") + case <-sCtx.Done(): + } + + gotCluster, err := fakeClient.WaitForWatchCluster(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchCDS failed with error: %v", err) + } + if gotCluster != "clusterB" { + t.Fatalf("xdsClient.WatchCDS called for cluster: %v, want: %v", gotCluster, "clusterB") + } +} diff --git a/xds/internal/balancer/clusterimpl/balancer_test.go b/xds/internal/balancer/clusterimpl/balancer_test.go index 3e6ac0fd2900..9a36db4dc7d8 100644 --- a/xds/internal/balancer/clusterimpl/balancer_test.go +++ b/xds/internal/balancer/clusterimpl/balancer_test.go @@ -20,6 +20,8 @@ package clusterimpl import ( "context" + "errors" + "fmt" "strings" "testing" "time" @@ -27,26 +29,40 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/grpctest" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/client/load" - "google.golang.org/grpc/xds/internal/testutils" + xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) const ( - defaultTestTimeout = 1 * time.Second - testClusterName = "test-cluster" - testServiceName = "test-eds-service" - testLRSServerName = "test-lrs-name" + defaultTestTimeout = 5 * time.Second + defaultShortTestTimeout = 100 * time.Microsecond + + testClusterName = "test-cluster" + testServiceName = "test-eds-service" ) var ( testBackendAddrs = []resolver.Address{ {Addr: "1.1.1.1:1"}, } + testLRSServerConfig = &bootstrap.ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: bootstrap.ChannelCreds{ + Type: "google_default", + }, + } cmpOpts = cmp.Options{ cmpopts.EquateEmpty(), @@ -54,19 +70,28 @@ var ( } ) +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + func init() { - newRandomWRR = testutils.NewTestWRR + NewRandomWRR = testutils.NewTestWRR } // TestDropByCategory verifies that the balancer correctly drops the picks, and // that the drops are reported. -func TestDropByCategory(t *testing.T) { +func (s) TestDropByCategory(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() - builder := balancer.Get(clusterImplName) + builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) b := builder.Build(cc, balancer.BuildOptions{}) defer b.Close() @@ -77,14 +102,12 @@ func TestDropByCategory(t *testing.T) { dropDenominator = 2 ) if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, - BalancerConfig: &lbConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LRSLoadReportingServerName: newString(testLRSServerName), - DropCategories: []dropCategory{{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, + DropCategories: []DropConfig{{ Category: dropReason, RequestsPerMillion: million * dropNumerator / dropDenominator, }}, @@ -96,47 +119,45 @@ func TestDropByCategory(t *testing.T) { t.Fatalf("unexpected error from UpdateClientConnState: %v", err) } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - got, err := xdsC.WaitForReportLoad(ctx) if err != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) } - if got.Server != testLRSServerName { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName) + if got.Server != testLRSServerConfig { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerConfig) } sc1 := <-cc.NewSubConnCh b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) // This should get the connecting picker. - p0 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with one backend. - p1 := <-cc.NewPickerCh + const rpcCount = 20 - for i := 0; i < rpcCount; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - // Even RPCs are dropped. - if i%2 == 0 { - if err == nil || !strings.Contains(err.Error(), "dropped") { - t.Fatalf("pick.Pick, got %v, %v, want error RPC dropped", gotSCSt, err) + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + for i := 0; i < rpcCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + // Even RPCs are dropped. + if i%2 == 0 { + if err == nil || !strings.Contains(err.Error(), "dropped") { + return fmt.Errorf("pick.Pick, got %v, %v, want error RPC dropped", gotSCSt, err) + } + continue + } + if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + return fmt.Errorf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) } - continue - } - if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) - } - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) } + return nil + }); err != nil { + t.Fatal(err.Error()) } // Dump load data from the store and compare with expected counts. @@ -150,6 +171,9 @@ func TestDropByCategory(t *testing.T) { Service: testServiceName, TotalDrops: dropCount, Drops: map[string]uint64{dropReason: dropCount}, + LocalityStats: map[string]load.LocalityData{ + assertString(xdsinternal.LocalityID{}.ToString): {RequestStats: load.RequestData{Succeeded: rpcCount - dropCount}}, + }, }} gotStatsData0 := loadStore.Stats([]string{testClusterName}) @@ -164,14 +188,12 @@ func TestDropByCategory(t *testing.T) { dropDenominator2 = 4 ) if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, - BalancerConfig: &lbConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LRSLoadReportingServerName: newString(testLRSServerName), - DropCategories: []dropCategory{{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, + DropCategories: []DropConfig{{ Category: dropReason2, RequestsPerMillion: million * dropNumerator2 / dropDenominator2, }}, @@ -183,22 +205,26 @@ func TestDropByCategory(t *testing.T) { t.Fatalf("unexpected error from UpdateClientConnState: %v", err) } - p2 := <-cc.NewPickerCh - for i := 0; i < rpcCount; i++ { - gotSCSt, err := p2.Pick(balancer.PickInfo{}) - // Even RPCs are dropped. - if i%4 == 0 { - if err == nil || !strings.Contains(err.Error(), "dropped") { - t.Fatalf("pick.Pick, got %v, %v, want error RPC dropped", gotSCSt, err) + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + for i := 0; i < rpcCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + // Even RPCs are dropped. + if i%4 == 0 { + if err == nil || !strings.Contains(err.Error(), "dropped") { + return fmt.Errorf("pick.Pick, got %v, %v, want error RPC dropped", gotSCSt, err) + } + continue + } + if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + return fmt.Errorf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) } - continue - } - if err != nil || !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) - } - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) } + return nil + }); err != nil { + t.Fatal(err.Error()) } const dropCount2 = rpcCount * dropNumerator2 / dropDenominator2 @@ -207,6 +233,9 @@ func TestDropByCategory(t *testing.T) { Service: testServiceName, TotalDrops: dropCount2, Drops: map[string]uint64{dropReason2: dropCount2}, + LocalityStats: map[string]load.LocalityData{ + assertString(xdsinternal.LocalityID{}.ToString): {RequestStats: load.RequestData{Succeeded: rpcCount - dropCount2}}, + }, }} gotStatsData1 := loadStore.Stats([]string{testClusterName}) @@ -217,27 +246,23 @@ func TestDropByCategory(t *testing.T) { // TestDropCircuitBreaking verifies that the balancer correctly drops the picks // due to circuit breaking, and that the drops are reported. -func TestDropCircuitBreaking(t *testing.T) { +func (s) TestDropCircuitBreaking(t *testing.T) { + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() - builder := balancer.Get(clusterImplName) + builder := balancer.Get(Name) cc := testutils.NewTestClientConn(t) b := builder.Build(cc, balancer.BuildOptions{}) defer b.Close() var maxRequest uint32 = 50 if err := b.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, - BalancerConfig: &lbConfig{ - Cluster: testClusterName, - EDSServiceName: testServiceName, - LRSLoadReportingServerName: newString(testLRSServerName), - MaxConcurrentRequests: &maxRequest, + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: &maxRequest, ChildPolicy: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, @@ -253,58 +278,59 @@ func TestDropCircuitBreaking(t *testing.T) { if err != nil { t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) } - if got.Server != testLRSServerName { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName) + if got.Server != testLRSServerConfig { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerConfig) } sc1 := <-cc.NewSubConnCh b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) // This should get the connecting picker. - p0 := <-cc.NewPickerCh - for i := 0; i < 10; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != balancer.ErrNoSubConnAvailable { - t.Fatalf("picker.Pick, got _,%v, want Err=%v", err, balancer.ErrNoSubConnAvailable) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with one backend. - dones := []func(){} - p1 := <-cc.NewPickerCh const rpcCount = 100 - for i := 0; i < rpcCount; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - if i < 50 && err != nil { - t.Errorf("The first 50%% picks should be non-drops, got error %v", err) - } else if i > 50 && err == nil { - t.Errorf("The second 50%% picks should be drops, got error ") - } - dones = append(dones, func() { - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + dones := []func(){} + for i := 0; i < rpcCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + if i < 50 && err != nil { + return fmt.Errorf("The first 50%% picks should be non-drops, got error %v", err) + } else if i > 50 && err == nil { + return fmt.Errorf("The second 50%% picks should be drops, got error ") } - }) - } - for _, done := range dones { - done() - } - - dones = []func(){} - // Pick without drops. - for i := 0; i < 50; i++ { - gotSCSt, err := p1.Pick(balancer.PickInfo{}) - if err != nil { - t.Errorf("The third 50%% picks should be non-drops, got error %v", err) + dones = append(dones, func() { + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) + } + }) } - dones = append(dones, func() { - if gotSCSt.Done != nil { - gotSCSt.Done(balancer.DoneInfo{}) + for _, done := range dones { + done() + } + + dones = []func(){} + // Pick without drops. + for i := 0; i < 50; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + if err != nil { + t.Errorf("The third 50%% picks should be non-drops, got error %v", err) } - }) - } - for _, done := range dones { - done() + dones = append(dones, func() { + if gotSCSt.Done != nil { + gotSCSt.Done(balancer.DoneInfo{}) + } + }) + } + for _, done := range dones { + done() + } + + return nil + }); err != nil { + t.Fatal(err.Error()) } // Dump load data from the store and compare with expected counts. @@ -317,6 +343,9 @@ func TestDropCircuitBreaking(t *testing.T) { Cluster: testClusterName, Service: testServiceName, TotalDrops: uint64(maxRequest), + LocalityStats: map[string]load.LocalityData{ + assertString(xdsinternal.LocalityID{}.ToString): {RequestStats: load.RequestData{Succeeded: uint64(rpcCount - maxRequest + 50)}}, + }, }} gotStatsData0 := loadStore.Stats([]string{testClusterName}) @@ -324,3 +353,432 @@ func TestDropCircuitBreaking(t *testing.T) { t.Fatalf("got unexpected drop reports, diff (-got, +want): %v", diff) } } + +// TestPickerUpdateAfterClose covers the case where a child policy sends a +// picker update after the cluster_impl policy is closed. Because picker updates +// are handled in the run() goroutine, which exits before Close() returns, we +// expect the above picker update to be dropped. +func (s) TestPickerUpdateAfterClose(t *testing.T) { + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) + xdsC := fakeclient.NewClient() + + builder := balancer.Get(Name) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + + // Create a stub balancer which waits for the cluster_impl policy to be + // closed before sending a picker update (upon receipt of a subConn state + // change). + closeCh := make(chan struct{}) + const childPolicyName = "stubBalancer-TestPickerUpdateAfterClose" + stub.Register(childPolicyName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + // Create a subConn which will be used later on to test the race + // between UpdateSubConnState() and Close(). + bd.ClientConn.NewSubConn(ccs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, _ balancer.SubConn, _ balancer.SubConnState) { + go func() { + // Wait for Close() to be called on the parent policy before + // sending the picker update. + <-closeCh + bd.ClientConn.UpdateState(balancer.State{ + Picker: base.NewErrPicker(errors.New("dummy error picker")), + }) + }() + }, + }) + + var maxRequest uint32 = 50 + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + MaxConcurrentRequests: &maxRequest, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: childPolicyName, + }, + }, + }); err != nil { + b.Close() + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + // Send a subConn state change to trigger a picker update. The stub balancer + // that we use as the child policy will not send a picker update until the + // parent policy is closed. + sc1 := <-cc.NewSubConnCh + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.Close() + close(closeCh) + + select { + case <-cc.NewPickerCh: + t.Fatalf("unexpected picker update after balancer is closed") + case <-time.After(defaultShortTestTimeout): + } +} + +// TestClusterNameInAddressAttributes covers the case that cluster name is +// attached to the subconn address attributes. +func (s) TestClusterNameInAddressAttributes(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) + xdsC := fakeclient.NewClient() + + builder := balancer.Get(Name) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + sc1 := <-cc.NewSubConnCh + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // This should get the connecting picker. + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) + } + + addrs1 := <-cc.NewSubConnAddrsCh + if got, want := addrs1[0].Addr, testBackendAddrs[0].Addr; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + cn, ok := internal.GetXDSHandshakeClusterName(addrs1[0].Attributes) + if !ok || cn != testClusterName { + t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn, ok, testClusterName) + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Test pick with one backend. + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } + + const testClusterName2 = "test-cluster-2" + var addr2 = resolver.Address{Addr: "2.2.2.2"} + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: []resolver.Address{addr2}}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName2, + EDSServiceName: testServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + addrs2 := <-cc.NewSubConnAddrsCh + if got, want := addrs2[0].Addr, addr2.Addr; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + // New addresses should have the new cluster name. + cn2, ok := internal.GetXDSHandshakeClusterName(addrs2[0].Attributes) + if !ok || cn2 != testClusterName2 { + t.Fatalf("sc is created with addr with cluster name %v, %v, want cluster name %v", cn2, ok, testClusterName2) + } +} + +// TestReResolution verifies that when a SubConn turns transient failure, +// re-resolution is triggered. +func (s) TestReResolution(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + defer xdsclient.ClearCounterForTesting(testClusterName, testServiceName) + xdsC := fakeclient.NewClient() + + builder := balancer.Get(Name) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: testBackendAddrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + sc1 := <-cc.NewSubConnCh + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // This should get the connecting picker. + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + // This should get the transient failure picker. + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) + } + + // The transient failure should trigger a re-resolution. + select { + case <-cc.ResolveNowCh: + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for ResolveNow()") + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Test pick with one backend. + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + // This should get the transient failure picker. + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) + } + + // The transient failure should trigger a re-resolution. + select { + case <-cc.ResolveNowCh: + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for ResolveNow()") + } +} + +func (s) TestLoadReporting(t *testing.T) { + var testLocality = xdsinternal.LocalityID{ + Region: "test-region", + Zone: "test-zone", + SubZone: "test-sub-zone", + } + + xdsC := fakeclient.NewClient() + + builder := balancer.Get(Name) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + addrs := make([]resolver.Address, len(testBackendAddrs)) + for i, a := range testBackendAddrs { + addrs[i] = xdsinternal.SetLocalityID(a, testLocality) + } + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, + // Locality: testLocality, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + got, err := xdsC.WaitForReportLoad(ctx) + if err != nil { + t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) + } + if got.Server != testLRSServerConfig { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerConfig) + } + + sc1 := <-cc.NewSubConnCh + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // This should get the connecting picker. + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) + } + + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Test pick with one backend. + const successCount = 5 + const errorCount = 5 + if err := cc.WaitForPicker(ctx, func(p balancer.Picker) error { + for i := 0; i < successCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + return fmt.Errorf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + gotSCSt.Done(balancer.DoneInfo{}) + } + for i := 0; i < errorCount; i++ { + gotSCSt, err := p.Pick(balancer.PickInfo{}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + return fmt.Errorf("picker.Pick, got %v, %v, want SubConn=%v", gotSCSt, err, sc1) + } + gotSCSt.Done(balancer.DoneInfo{Err: fmt.Errorf("error")}) + } + return nil + }); err != nil { + t.Fatal(err.Error()) + } + + // Dump load data from the store and compare with expected counts. + loadStore := xdsC.LoadStore() + if loadStore == nil { + t.Fatal("loadStore is nil in xdsClient") + } + sds := loadStore.Stats([]string{testClusterName}) + if len(sds) == 0 { + t.Fatalf("loads for cluster %v not found in store", testClusterName) + } + sd := sds[0] + if sd.Cluster != testClusterName || sd.Service != testServiceName { + t.Fatalf("got unexpected load for %q, %q, want %q, %q", sd.Cluster, sd.Service, testClusterName, testServiceName) + } + testLocalityJSON, _ := testLocality.ToString() + localityData, ok := sd.LocalityStats[testLocalityJSON] + if !ok { + t.Fatalf("loads for %v not found in store", testLocality) + } + reqStats := localityData.RequestStats + if reqStats.Succeeded != successCount { + t.Errorf("got succeeded %v, want %v", reqStats.Succeeded, successCount) + } + if reqStats.Errored != errorCount { + t.Errorf("got errord %v, want %v", reqStats.Errored, errorCount) + } + if reqStats.InProgress != 0 { + t.Errorf("got inProgress %v, want %v", reqStats.InProgress, 0) + } + + b.Close() + if err := xdsC.WaitForCancelReportLoad(ctx); err != nil { + t.Fatalf("unexpected error waiting form load report to be canceled: %v", err) + } +} + +// TestUpdateLRSServer covers the cases +// - the init config specifies "" as the LRS server +// - config modifies LRS server to a different string +// - config sets LRS server to nil to stop load reporting +func (s) TestUpdateLRSServer(t *testing.T) { + var testLocality = xdsinternal.LocalityID{ + Region: "test-region", + Zone: "test-zone", + SubZone: "test-sub-zone", + } + + xdsC := fakeclient.NewClient() + + builder := balancer.Get(Name) + cc := testutils.NewTestClientConn(t) + b := builder.Build(cc, balancer.BuildOptions{}) + defer b.Close() + + addrs := make([]resolver.Address, len(testBackendAddrs)) + for i, a := range testBackendAddrs { + addrs[i] = xdsinternal.SetLocalityID(a, testLocality) + } + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + got, err := xdsC.WaitForReportLoad(ctx) + if err != nil { + t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) + } + if got.Server != testLRSServerConfig { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerConfig) + } + + testLRSServerConfig2 := &bootstrap.ServerConfig{ + ServerURI: "trafficdirector-another.googleapis.com:443", + Creds: bootstrap.ChannelCreds{ + Type: "google_default", + }, + } + // Update LRS server to a different name. + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + LoadReportingServer: testLRSServerConfig2, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + if err := xdsC.WaitForCancelReportLoad(ctx); err != nil { + t.Fatalf("unexpected error waiting form load report to be canceled: %v", err) + } + got2, err2 := xdsC.WaitForReportLoad(ctx) + if err2 != nil { + t.Fatalf("xdsClient.ReportLoad failed with error: %v", err2) + } + if got2.Server != testLRSServerConfig2 { + t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got2.Server, testLRSServerConfig2) + } + + // Update LRS server to nil, to disable LRS. + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{Addresses: addrs}, xdsC), + BalancerConfig: &LBConfig{ + Cluster: testClusterName, + EDSServiceName: testServiceName, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }); err != nil { + t.Fatalf("unexpected error from UpdateClientConnState: %v", err) + } + if err := xdsC.WaitForCancelReportLoad(ctx); err != nil { + t.Fatalf("unexpected error waiting form load report to be canceled: %v", err) + } + + shortCtx, shortCancel := context.WithTimeout(context.Background(), defaultShortTestTimeout) + defer shortCancel() + if s, err := xdsC.WaitForReportLoad(shortCtx); err != context.DeadlineExceeded { + t.Fatalf("unexpected load report to server: %q", s) + } +} + +func assertString(f func() (string, error)) string { + s, err := f() + if err != nil { + panic(err.Error()) + } + return s +} diff --git a/xds/internal/balancer/clusterimpl/clusterimpl.go b/xds/internal/balancer/clusterimpl/clusterimpl.go index 4e4af5a02b45..d316d6a62a91 100644 --- a/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -26,107 +26,135 @@ package clusterimpl import ( "encoding/json" "fmt" + "sync" + "sync/atomic" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + xdsinternal "google.golang.org/grpc/xds/internal" "google.golang.org/grpc/xds/internal/balancer/loadstore" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) const ( - clusterImplName = "xds_cluster_impl_experimental" + // Name is the name of the cluster_impl balancer. + Name = "xds_cluster_impl_experimental" defaultRequestCountMax = 1024 ) func init() { - balancer.Register(clusterImplBB{}) + balancer.Register(bb{}) } -var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } +type bb struct{} -type clusterImplBB struct{} - -func (clusterImplBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &clusterImplBalancer{ ClientConn: cc, bOpts: bOpts, closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), loadWrapper: loadstore.NewWrapper(), + scWrappers: make(map[balancer.SubConn]*scWrapper), pickerUpdateCh: buffer.NewUnbounded(), requestCountMax: defaultRequestCountMax, } b.logger = prefixLogger(b) - - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil - } - b.xdsC = client + b.child = gracefulswitch.NewBalancer(b, bOpts) go b.run() - b.logger.Infof("Created") return b } -func (clusterImplBB) Name() string { - return clusterImplName +func (bb) Name() string { + return Name } -func (clusterImplBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } -// xdsClientInterface contains only the xds_client methods needed by LRS -// balancer. It's defined so we can override xdsclient in tests. -type xdsClientInterface interface { - ReportLoad(server string) (*load.Store, func()) - Close() -} - type clusterImplBalancer struct { balancer.ClientConn - bOpts balancer.BuildOptions + + // mu guarantees mutual exclusion between Close() and handling of picker + // update to the parent ClientConn in run(). It's to make sure that the + // run() goroutine doesn't send picker update to parent after the balancer + // is closed. + // + // It's only used by the run() goroutine, but not the other exported + // functions. Because the exported functions are guaranteed to be + // synchronized with Close(). + mu sync.Mutex closed *grpcsync.Event - logger *grpclog.PrefixLogger - xdsC xdsClientInterface + done *grpcsync.Event - config *lbConfig - childLB balancer.Balancer + bOpts balancer.BuildOptions + logger *grpclog.PrefixLogger + xdsClient xdsclient.XDSClient + + config *LBConfig + child *gracefulswitch.Balancer cancelLoadReport func() - clusterName string edsServiceName string - lrsServerName string + lrsServer *bootstrap.ServerConfig loadWrapper *loadstore.Wrapper - // childState/drops/requestCounter can only be accessed in run(). And run() - // is the only goroutine that sends picker to the parent ClientConn. All + clusterNameMu sync.Mutex + clusterName string + + scWrappersMu sync.Mutex + // The SubConns passed to the child policy are wrapped in a wrapper, to keep + // locality ID. But when the parent ClientConn sends updates, it's going to + // give the original SubConn, not the wrapper. But the child policies only + // know about the wrapper, so when forwarding SubConn updates, they must be + // sent for the wrappers. + // + // This keeps a map from original SubConn to wrapper, so that when + // forwarding the SubConn state update, the child policy will get the + // wrappers. + scWrappers map[balancer.SubConn]*scWrapper + + // childState/drops/requestCounter keeps the state used by the most recently + // generated picker. All fields can only be accessed in run(). And run() is + // the only goroutine that sends picker to the parent ClientConn. All // requests to update picker need to be sent to pickerUpdateCh. - childState balancer.State - drops []*dropper - requestCounter *xdsclient.ServiceRequestsCounter - requestCountMax uint32 - pickerUpdateCh *buffer.Unbounded + childState balancer.State + dropCategories []DropConfig // The categories for drops. + drops []*dropper + requestCounterCluster string // The cluster name for the request counter. + requestCounterService string // The service name for the request counter. + requestCounter *xdsclient.ClusterRequestsCounter + requestCountMax uint32 + pickerUpdateCh *buffer.Unbounded } // updateLoadStore checks the config for load store, and decides whether it // needs to restart the load reporting stream. -func (cib *clusterImplBalancer) updateLoadStore(newConfig *lbConfig) error { +func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { var updateLoadClusterAndService bool // ClusterName is different, restart. ClusterName is from ClusterName and - // EdsServiceName. - if cib.clusterName != newConfig.Cluster { + // EDSServiceName. + clusterName := b.getClusterName() + if clusterName != newConfig.Cluster { updateLoadClusterAndService = true - cib.clusterName = newConfig.Cluster + b.setClusterName(newConfig.Cluster) + clusterName = newConfig.Cluster } - if cib.edsServiceName != newConfig.EDSServiceName { + if b.edsServiceName != newConfig.EDSServiceName { updateLoadClusterAndService = true - cib.edsServiceName = newConfig.EDSServiceName + b.edsServiceName = newConfig.EDSServiceName } if updateLoadClusterAndService { // This updates the clusterName and serviceName that will be reported @@ -137,39 +165,66 @@ func (cib *clusterImplBalancer) updateLoadStore(newConfig *lbConfig) error { // On the other hand, this will almost never happen. Each LRS policy // shouldn't get updated config. The parent should do a graceful switch // when the clusterName or serviceName is changed. - cib.loadWrapper.UpdateClusterAndService(cib.clusterName, cib.edsServiceName) + b.loadWrapper.UpdateClusterAndService(clusterName, b.edsServiceName) } + var ( + stopOldLoadReport bool + startNewLoadReport bool + ) + // Check if it's necessary to restart load report. - var newLRSServerName string - if newConfig.LRSLoadReportingServerName != nil { - newLRSServerName = *newConfig.LRSLoadReportingServerName + if b.lrsServer == nil { + if newConfig.LoadReportingServer != nil { + // Old is nil, new is not nil, start new LRS. + b.lrsServer = newConfig.LoadReportingServer + startNewLoadReport = true + } + // Old is nil, new is nil, do nothing. + } else if newConfig.LoadReportingServer == nil { + // Old is not nil, new is nil, stop old, don't start new. + b.lrsServer = newConfig.LoadReportingServer + stopOldLoadReport = true + } else { + // Old is not nil, new is not nil, compare string values, if + // different, stop old and start new. + if !b.lrsServer.Equal(newConfig.LoadReportingServer) { + b.lrsServer = newConfig.LoadReportingServer + stopOldLoadReport = true + startNewLoadReport = true + } } - if cib.lrsServerName != newLRSServerName { - // LrsLoadReportingServerName is different, load should be report to a - // different server, restart. - cib.lrsServerName = newLRSServerName - if cib.cancelLoadReport != nil { - cib.cancelLoadReport() - cib.cancelLoadReport = nil + + if stopOldLoadReport { + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + if !startNewLoadReport { + // If a new LRS stream will be started later, no need to update + // it to nil here. + b.loadWrapper.UpdateLoadStore(nil) + } } + } + if startNewLoadReport { var loadStore *load.Store - if cib.xdsC != nil { - loadStore, cib.cancelLoadReport = cib.xdsC.ReportLoad(cib.lrsServerName) + if b.xdsClient != nil { + loadStore, b.cancelLoadReport = b.xdsClient.ReportLoad(b.lrsServer) } - cib.loadWrapper.UpdateLoadStore(loadStore) + b.loadWrapper.UpdateLoadStore(loadStore) } return nil } -func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - if cib.closed.HasFired() { - cib.logger.Warningf("xds: received ClientConnState {%+v} after clusterImplBalancer was closed", s) +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + if b.closed.HasFired() { + b.logger.Warningf("xds: received ClientConnState {%+v} after clusterImplBalancer was closed", s) return nil } - newConfig, ok := s.BalancerConfig.(*lbConfig) + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } @@ -182,147 +237,283 @@ func (cib *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) } + if b.xdsClient == nil { + c := xdsclient.FromResolverState(s.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + } + // Update load reporting config. This needs to be done before updating the // child policy because we need the loadStore from the updated client to be // passed to the ccWrapper, so that the next picker from the child policy // will pick up the new loadStore. - if err := cib.updateLoadStore(newConfig); err != nil { + if err := b.updateLoadStore(newConfig); err != nil { return err } - // Compare new drop config. And update picker if it's changed. - var updatePicker bool - if cib.config == nil || !equalDropCategories(cib.config.DropCategories, newConfig.DropCategories) { - cib.drops = make([]*dropper, 0, len(newConfig.DropCategories)) - for _, c := range newConfig.DropCategories { - cib.drops = append(cib.drops, newDropper(c)) - } - updatePicker = true - } - - // Compare cluster name. And update picker if it's changed, because circuit - // breaking's stream counter will be different. - if cib.config == nil || cib.config.Cluster != newConfig.Cluster { - cib.requestCounter = xdsclient.GetServiceRequestsCounter(newConfig.Cluster) - updatePicker = true - } - // Compare upper bound of stream count. And update picker if it's changed. - // This is also for circuit breaking. - var newRequestCountMax uint32 = 1024 - if newConfig.MaxConcurrentRequests != nil { - newRequestCountMax = *newConfig.MaxConcurrentRequests - } - if cib.requestCountMax != newRequestCountMax { - cib.requestCountMax = newRequestCountMax - updatePicker = true - } - - if updatePicker { - cib.pickerUpdateCh.Put(&dropConfigs{ - drops: cib.drops, - requestCounter: cib.requestCounter, - requestCountMax: cib.requestCountMax, - }) - } - - // If child policy is a different type, recreate the sub-balancer. - if cib.config == nil || cib.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { - if cib.childLB != nil { - cib.childLB.Close() + if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { + if err := b.child.SwitchTo(bb); err != nil { + return fmt.Errorf("error switching to child of type %q: %v", newConfig.ChildPolicy.Name, err) } - cib.childLB = bb.Build(cib, cib.bOpts) } - cib.config = newConfig + b.config = newConfig - if cib.childLB == nil { - // This is not an expected situation, and should be super rare in - // practice. - // - // When this happens, we already applied all the other configurations - // (drop/circuit breaking), but there's no child policy. This balancer - // will be stuck, and we report the error to the parent. - return fmt.Errorf("child policy is nil, this means balancer %q's Build() returned nil", newConfig.ChildPolicy.Name) - } + // Notify run() of this new config, in case drop and request counter need + // update (which means a new picker needs to be generated). + b.pickerUpdateCh.Put(newConfig) // Addresses and sub-balancer config are sent to sub-balancer. - return cib.childLB.UpdateClientConnState(balancer.ClientConnState{ + return b.child.UpdateClientConnState(balancer.ClientConnState{ ResolverState: s.ResolverState, - BalancerConfig: cib.config.ChildPolicy.Config, + BalancerConfig: b.config.ChildPolicy.Config, }) } -func (cib *clusterImplBalancer) ResolverError(err error) { - if cib.closed.HasFired() { - cib.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) +func (b *clusterImplBalancer) ResolverError(err error) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) return } - - if cib.childLB != nil { - cib.childLB.ResolverError(err) - } + b.child.ResolverError(err) } -func (cib *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if cib.closed.HasFired() { - cib.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s) +func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if b.closed.HasFired() { + b.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s) return } - if cib.childLB != nil { - cib.childLB.UpdateSubConnState(sc, s) + // Trigger re-resolution when a SubConn turns transient failure. This is + // necessary for the LogicalDNS in cluster_resolver policy to re-resolve. + // + // Note that this happens not only for the addresses from DNS, but also for + // EDS (cluster_impl doesn't know if it's DNS or EDS, only the parent + // knows). The parent priority policy is configured to ignore re-resolution + // signal from the EDS children. + if s.ConnectivityState == connectivity.TransientFailure { + b.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) } -} -func (cib *clusterImplBalancer) Close() { - if cib.childLB != nil { - cib.childLB.Close() - cib.childLB = nil + b.scWrappersMu.Lock() + if scw, ok := b.scWrappers[sc]; ok { + sc = scw + if s.ConnectivityState == connectivity.Shutdown { + // Remove this SubConn from the map on Shutdown. + delete(b.scWrappers, scw.SubConn) + } } - cib.xdsC.Close() - cib.closed.Fire() - cib.logger.Infof("Shutdown") + b.scWrappersMu.Unlock() + b.child.UpdateSubConnState(sc, s) +} + +func (b *clusterImplBalancer) Close() { + b.mu.Lock() + b.closed.Fire() + b.mu.Unlock() + + b.child.Close() + b.childState = balancer.State{} + b.pickerUpdateCh.Close() + <-b.done.Done() + b.logger.Infof("Shutdown") +} + +func (b *clusterImplBalancer) ExitIdle() { + b.child.ExitIdle() } // Override methods to accept updates from the child LB. -func (cib *clusterImplBalancer) UpdateState(state balancer.State) { +func (b *clusterImplBalancer) UpdateState(state balancer.State) { // Instead of updating parent ClientConn inline, send state to run(). - cib.pickerUpdateCh.Put(state) + b.pickerUpdateCh.Put(state) +} + +func (b *clusterImplBalancer) setClusterName(n string) { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + b.clusterName = n +} + +func (b *clusterImplBalancer) getClusterName() string { + b.clusterNameMu.Lock() + defer b.clusterNameMu.Unlock() + return b.clusterName +} + +// scWrapper is a wrapper of SubConn with locality ID. The locality ID can be +// retrieved from the addresses when creating SubConn. +// +// All SubConns passed to the child policies are wrapped in this, so that the +// picker can get the localityID from the picked SubConn, and do load reporting. +// +// After wrapping, all SubConns to and from the parent ClientConn (e.g. for +// SubConn state update, update/remove SubConn) must be the original SubConns. +// All SubConns to and from the child policy (NewSubConn, forwarding SubConn +// state update) must be the wrapper. The balancer keeps a map from the original +// SubConn to the wrapper for this purpose. +type scWrapper struct { + balancer.SubConn + // locality needs to be atomic because it can be updated while being read by + // the picker. + locality atomic.Value // type xdsinternal.LocalityID +} + +func (scw *scWrapper) updateLocalityID(lID xdsinternal.LocalityID) { + scw.locality.Store(lID) +} + +func (scw *scWrapper) localityID() xdsinternal.LocalityID { + lID, _ := scw.locality.Load().(xdsinternal.LocalityID) + return lID +} + +func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + clusterName := b.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID + for i, addr := range addrs { + newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) + } + sc, err := b.ClientConn.NewSubConn(newAddrs, opts) + if err != nil { + return nil, err + } + // Wrap this SubConn in a wrapper, and add it to the map. + b.scWrappersMu.Lock() + ret := &scWrapper{SubConn: sc} + ret.updateLocalityID(lID) + b.scWrappers[sc] = ret + b.scWrappersMu.Unlock() + return ret, nil +} + +func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { + scw, ok := sc.(*scWrapper) + if !ok { + b.ClientConn.RemoveSubConn(sc) + return + } + // Remove the original SubConn from the parent ClientConn. + // + // Note that we don't remove this SubConn from the scWrappers map. We will + // need it to forward the final SubConn state Shutdown to the child policy. + // + // This entry is kept in the map until it's state is changes to Shutdown, + // and will be deleted in UpdateSubConnState(). + b.ClientConn.RemoveSubConn(scw.SubConn) +} + +func (b *clusterImplBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + clusterName := b.getClusterName() + newAddrs := make([]resolver.Address, len(addrs)) + var lID xdsinternal.LocalityID + for i, addr := range addrs { + newAddrs[i] = internal.SetXDSHandshakeClusterName(addr, clusterName) + lID = xdsinternal.GetLocalityID(newAddrs[i]) + } + if scw, ok := sc.(*scWrapper); ok { + scw.updateLocalityID(lID) + // Need to get the original SubConn from the wrapper before calling + // parent ClientConn. + sc = scw.SubConn + } + b.ClientConn.UpdateAddresses(sc, newAddrs) } type dropConfigs struct { drops []*dropper - requestCounter *xdsclient.ServiceRequestsCounter + requestCounter *xdsclient.ClusterRequestsCounter requestCountMax uint32 } -func (cib *clusterImplBalancer) run() { +// handleDropAndRequestCount compares drop and request counter in newConfig with +// the one currently used by picker. It returns a new dropConfigs if a new +// picker needs to be generated, otherwise it returns nil. +func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dropConfigs { + // Compare new drop config. And update picker if it's changed. + var updatePicker bool + if !equalDropCategories(b.dropCategories, newConfig.DropCategories) { + b.dropCategories = newConfig.DropCategories + b.drops = make([]*dropper, 0, len(newConfig.DropCategories)) + for _, c := range newConfig.DropCategories { + b.drops = append(b.drops, newDropper(c)) + } + updatePicker = true + } + + // Compare cluster name. And update picker if it's changed, because circuit + // breaking's stream counter will be different. + if b.requestCounterCluster != newConfig.Cluster || b.requestCounterService != newConfig.EDSServiceName { + b.requestCounterCluster = newConfig.Cluster + b.requestCounterService = newConfig.EDSServiceName + b.requestCounter = xdsclient.GetClusterRequestsCounter(newConfig.Cluster, newConfig.EDSServiceName) + updatePicker = true + } + // Compare upper bound of stream count. And update picker if it's changed. + // This is also for circuit breaking. + var newRequestCountMax uint32 = 1024 + if newConfig.MaxConcurrentRequests != nil { + newRequestCountMax = *newConfig.MaxConcurrentRequests + } + if b.requestCountMax != newRequestCountMax { + b.requestCountMax = newRequestCountMax + updatePicker = true + } + + if !updatePicker { + return nil + } + return &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + } +} + +func (b *clusterImplBalancer) run() { + defer b.done.Fire() for { select { - case update := <-cib.pickerUpdateCh.Get(): - cib.pickerUpdateCh.Load() + case update, ok := <-b.pickerUpdateCh.Get(): + if !ok { + return + } + b.pickerUpdateCh.Load() + b.mu.Lock() + if b.closed.HasFired() { + b.mu.Unlock() + return + } switch u := update.(type) { case balancer.State: - cib.childState = u - cib.ClientConn.UpdateState(balancer.State{ - ConnectivityState: cib.childState.ConnectivityState, - Picker: newDropPicker(cib.childState, &dropConfigs{ - drops: cib.drops, - requestCounter: cib.requestCounter, - requestCountMax: cib.requestCountMax, - }, cib.loadWrapper), + b.childState = u + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: newPicker(b.childState, &dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + }, b.loadWrapper), }) - case *dropConfigs: - cib.drops = u.drops - cib.requestCounter = u.requestCounter - if cib.childState.Picker != nil { - cib.ClientConn.UpdateState(balancer.State{ - ConnectivityState: cib.childState.ConnectivityState, - Picker: newDropPicker(cib.childState, u, cib.loadWrapper), + case *LBConfig: + dc := b.handleDropAndRequestCount(u) + if dc != nil && b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: newPicker(b.childState, dc, b.loadWrapper), }) } } - case <-cib.closed.Done(): + b.mu.Unlock() + case <-b.closed.Done(): + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } return } } diff --git a/xds/internal/balancer/clusterimpl/config.go b/xds/internal/balancer/clusterimpl/config.go index 548ab34bce4d..cfddc6fb2a1b 100644 --- a/xds/internal/balancer/clusterimpl/config.go +++ b/xds/internal/balancer/clusterimpl/config.go @@ -23,34 +23,38 @@ import ( internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) -type dropCategory struct { +// DropConfig contains the category, and drop ratio. +type DropConfig struct { Category string RequestsPerMillion uint32 } -// lbConfig is the balancer config for weighted_target. -type lbConfig struct { - serviceconfig.LoadBalancingConfig +// LBConfig is the balancer config for cluster_impl balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` - Cluster string - EDSServiceName string - LRSLoadReportingServerName *string - MaxConcurrentRequests *uint32 - DropCategories []dropCategory - ChildPolicy *internalserviceconfig.BalancerConfig + Cluster string `json:"cluster,omitempty"` + EDSServiceName string `json:"edsServiceName,omitempty"` + // LoadReportingServer is the LRS server to send load reports to. If not + // present, load reporting will be disabled. + LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + DropCategories []DropConfig `json:"dropCategories,omitempty"` + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` } -func parseConfig(c json.RawMessage) (*lbConfig, error) { - var cfg lbConfig +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } return &cfg, nil } -func equalDropCategories(a, b []dropCategory) bool { +func equalDropCategories(a, b []DropConfig) bool { if len(a) != len(b) { return false } diff --git a/xds/internal/balancer/clusterimpl/config_test.go b/xds/internal/balancer/clusterimpl/config_test.go index 89696981e2a0..b001b8fdf0a4 100644 --- a/xds/internal/balancer/clusterimpl/config_test.go +++ b/xds/internal/balancer/clusterimpl/config_test.go @@ -22,17 +22,22 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/balancer" _ "google.golang.org/grpc/balancer/roundrobin" + _ "google.golang.org/grpc/balancer/weightedtarget" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" ) const ( testJSONConfig = `{ "cluster": "test_cluster", "edsServiceName": "test-eds", - "lrsLoadReportingServerName": "lrs_server", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, "maxConcurrentRequests": 123, "dropCategories": [ { @@ -87,7 +92,7 @@ func TestParseConfig(t *testing.T) { tests := []struct { name string js string - want *lbConfig + want *LBConfig wantErr bool }{ { @@ -105,12 +110,12 @@ func TestParseConfig(t *testing.T) { { name: "OK", js: testJSONConfig, - want: &lbConfig{ - Cluster: "test_cluster", - EDSServiceName: "test-eds", - LRSLoadReportingServerName: newString("lrs_server"), - MaxConcurrentRequests: newUint32(123), - DropCategories: []dropCategory{ + want: &LBConfig{ + Cluster: "test_cluster", + EDSServiceName: "test-eds", + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(123), + DropCategories: []DropConfig{ {Category: "drop-1", RequestsPerMillion: 314}, {Category: "drop-2", RequestsPerMillion: 159}, }, @@ -128,17 +133,13 @@ func TestParseConfig(t *testing.T) { if (err != nil) != tt.wantErr { t.Fatalf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) } - if !cmp.Equal(got, tt.want) { + if !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(bootstrap.ServerConfig{}, "Creds")) { t.Errorf("parseConfig() got unexpected result, diff: %v", cmp.Diff(got, tt.want)) } }) } } -func newString(s string) *string { - return &s -} - func newUint32(i uint32) *uint32 { return &i } diff --git a/xds/internal/balancer/clusterimpl/picker.go b/xds/internal/balancer/clusterimpl/picker.go index 6e9d27911534..3f354424f28e 100644 --- a/xds/internal/balancer/clusterimpl/picker.go +++ b/xds/internal/balancer/clusterimpl/picker.go @@ -24,11 +24,15 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/wrr" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/load" + + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" ) -var newRandomWRR = wrr.NewRandom +// NewRandomWRR is used when calculating drops. It's exported so that tests can +// override it. +var NewRandomWRR = wrr.NewRandom const million = 1000000 @@ -47,8 +51,8 @@ func gcd(a, b uint32) uint32 { return a } -func newDropper(c dropCategory) *dropper { - w := newRandomWRR() +func newDropper(c DropConfig) *dropper { + w := NewRandomWRR() gcdv := gcd(c.RequestsPerMillion, million) // Return true for RequestPerMillion, false for the rest. w.Add(true, int64(c.RequestsPerMillion/gcdv)) @@ -64,21 +68,30 @@ func (d *dropper) drop() (ret bool) { return d.w.Next().(bool) } +const ( + serverLoadCPUName = "cpu_utilization" + serverLoadMemoryName = "mem_utilization" +) + // loadReporter wraps the methods from the loadStore that are used here. type loadReporter interface { + CallStarted(locality string) + CallFinished(locality string, err error) + CallServerLoad(locality, name string, val float64) CallDropped(locality string) } -type dropPicker struct { +// Picker implements RPC drop, circuit breaking drop and load reporting. +type picker struct { drops []*dropper s balancer.State loadStore loadReporter - counter *client.ServiceRequestsCounter + counter *xdsclient.ClusterRequestsCounter countMax uint32 } -func newDropPicker(s balancer.State, config *dropConfigs, loadStore load.PerClusterReporter) *dropPicker { - return &dropPicker{ +func newPicker(s balancer.State, config *dropConfigs, loadStore load.PerClusterReporter) *picker { + return &picker{ drops: config.drops, s: s, loadStore: loadStore, @@ -87,22 +100,22 @@ func newDropPicker(s balancer.State, config *dropConfigs, loadStore load.PerClus } } -func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // Don't drop unless the inner picker is READY. Similar to // https://github.com/grpc/grpc-go/issues/2622. - if d.s.ConnectivityState != connectivity.Ready { - return d.s.Picker.Pick(info) - } - - for _, dp := range d.drops { - if dp.drop() { - if d.loadStore != nil { - d.loadStore.CallDropped(dp.category) + if d.s.ConnectivityState == connectivity.Ready { + // Check if this RPC should be dropped by category. + for _, dp := range d.drops { + if dp.drop() { + if d.loadStore != nil { + d.loadStore.CallDropped(dp.category) + } + return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") } } + // Check if this RPC should be dropped by circuit breaking. if d.counter != nil { if err := d.counter.StartRequest(d.countMax); err != nil { // Drops by circuit breaking are reported with empty category. They @@ -112,11 +125,58 @@ func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { } return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) } - pr, err := d.s.Picker.Pick(info) - if err != nil { + } + + var lIDStr string + pr, err := d.s.Picker.Pick(info) + if scw, ok := pr.SubConn.(*scWrapper); ok { + // This OK check also covers the case err!=nil, because SubConn will be + // nil. + pr.SubConn = scw.SubConn + var e error + // If locality ID isn't found in the wrapper, an empty locality ID will + // be used. + lIDStr, e = scw.localityID().ToString() + if e != nil { + logger.Infof("failed to marshal LocalityID: %#v, loads won't be reported", scw.localityID()) + } + } + + if err != nil { + if d.counter != nil { + // Release one request count if this pick fails. d.counter.EndRequest() - return pr, err } + return pr, err + } + + if d.loadStore != nil { + d.loadStore.CallStarted(lIDStr) + oldDone := pr.Done + pr.Done = func(info balancer.DoneInfo) { + if oldDone != nil { + oldDone(info) + } + d.loadStore.CallFinished(lIDStr, info.Err) + + load, ok := info.ServerLoad.(*v3orcapb.OrcaLoadReport) + if !ok || load == nil { + return + } + d.loadStore.CallServerLoad(lIDStr, serverLoadCPUName, load.CpuUtilization) + d.loadStore.CallServerLoad(lIDStr, serverLoadMemoryName, load.MemUtilization) + for n, c := range load.RequestCost { + d.loadStore.CallServerLoad(lIDStr, n, c) + } + for n, c := range load.Utilization { + d.loadStore.CallServerLoad(lIDStr, n, c) + } + } + } + + if d.counter != nil { + // Update Done() so that when the RPC finishes, the request count will + // be released. oldDone := pr.Done pr.Done = func(doneInfo balancer.DoneInfo) { d.counter.EndRequest() @@ -124,8 +184,7 @@ func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { oldDone(doneInfo) } } - return pr, err } - return d.s.Picker.Pick(info) + return pr, err } diff --git a/xds/internal/balancer/clusterimpl/tests/balancer_test.go b/xds/internal/balancer/clusterimpl/tests/balancer_test.go new file mode 100644 index 000000000000..1cb8492949a0 --- /dev/null +++ b/xds/internal/balancer/clusterimpl/tests/balancer_test.go @@ -0,0 +1,156 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterimpl_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/status" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/xds" +) + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 100 * time.Millisecond +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestConfigUpdateWithSameLoadReportingServerConfig tests the scenario where +// the clusterimpl LB policy receives a config update with no change in the load +// reporting server configuration. The test verifies that the existing load +// repoting stream is not terminated and that a new load reporting stream is not +// created. +func (s) TestConfigUpdateWithSameLoadReportingServerConfig(t *testing.T) { + // Create an xDS management server that serves ADS and LRS requests. + opts := e2e.ManagementServerOptions{SupportLoadReportingService: true} + mgmtServer, nodeID, _, resolver, mgmtServerCleanup := e2e.SetupManagementServer(t, opts) + defer mgmtServerCleanup() + + // Start a server backend exposing the test service. + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure the xDS management server with default resources. Override the + // default cluster to include an LRS server config pointing to self. + const serviceName = "my-test-xds-service" + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: testutils.ParsePort(t, server.Address), + SecLevel: e2e.SecurityLevelNone, + }) + resources.Clusters[0].LrsServer = &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(fmt.Sprintf("xds:///%s", serviceName), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(resolver)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Ensure that an LRS stream is created. + if _, err := mgmtServer.LRSServer.LRSStreamOpenChan.Receive(ctx); err != nil { + t.Fatalf("Failure when waiting for an LRS stream to be opened: %v", err) + } + + // Configure a new resource on the management server with drop config that + // drops all RPCs, but with no change in the load reporting server config. + resources.Endpoints = []*v3endpointpb.ClusterLoadAssignment{ + e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ + ClusterName: "endpoints-" + serviceName, + Host: "localhost", + Localities: []e2e.LocalityOptions{ + { + Backends: []e2e.BackendOptions{{Port: testutils.ParsePort(t, server.Address)}}, + Weight: 1, + }, + }, + DropPercents: map[string]int{"test-drop-everything": 100}, + }), + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Repeatedly send RPCs until we sees that they are getting dropped, or the + // test context deadline expires. The former indicates that new config with + // drops has been applied. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if err != nil && status.Code(err) == codes.Unavailable && strings.Contains(err.Error(), "RPC is dropped") { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for RPCs to be dropped after config update") + } + + // Ensure that the old LRS stream is not closed. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := mgmtServer.LRSServer.LRSStreamCloseChan.Receive(sCtx); err == nil { + t.Fatal("LRS stream closed when expected not to") + } + + // Also ensure that a new LRS stream is not created. + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := mgmtServer.LRSServer.LRSStreamOpenChan.Receive(sCtx); err == nil { + t.Fatal("New LRS stream created when expected not to") + } +} diff --git a/xds/internal/balancer/clustermanager/balancerstateaggregator.go b/xds/internal/balancer/clustermanager/balancerstateaggregator.go index 35eb86c3590c..4b971a3e241b 100644 --- a/xds/internal/balancer/clustermanager/balancerstateaggregator.go +++ b/xds/internal/balancer/clustermanager/balancerstateaggregator.go @@ -57,6 +57,11 @@ type balancerStateAggregator struct { // // If an ID is not in map, it's either removed or never added. idToPickerState map[string]*subBalancerState + // Set when UpdateState call propagation is paused. + pauseUpdateState bool + // Set when UpdateState call propagation is paused and an UpdateState call + // is suppressed. + needUpdateStateOnResume bool } func newBalancerStateAggregator(cc balancer.ClientConn, logger *grpclog.PrefixLogger) *balancerStateAggregator { @@ -118,6 +123,27 @@ func (bsa *balancerStateAggregator) remove(id string) { delete(bsa.idToPickerState, id) } +// pauseStateUpdates causes UpdateState calls to not propagate to the parent +// ClientConn. The last state will be remembered and propagated when +// ResumeStateUpdates is called. +func (bsa *balancerStateAggregator) pauseStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = true + bsa.needUpdateStateOnResume = false +} + +// resumeStateUpdates will resume propagating UpdateState calls to the parent, +// and call UpdateState on the parent if any UpdateState call was suppressed. +func (bsa *balancerStateAggregator) resumeStateUpdates() { + bsa.mu.Lock() + defer bsa.mu.Unlock() + bsa.pauseUpdateState = false + if bsa.needUpdateStateOnResume { + bsa.cc.UpdateState(bsa.build()) + } +} + // UpdateState is called to report a balancer state change from sub-balancer. // It's usually called by the balancer group. // @@ -143,6 +169,12 @@ func (bsa *balancerStateAggregator) UpdateState(id string, state balancer.State) if !bsa.started { return } + if bsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + bsa.needUpdateStateOnResume = true + return + } bsa.cc.UpdateState(bsa.build()) } @@ -168,6 +200,12 @@ func (bsa *balancerStateAggregator) buildAndUpdate() { if !bsa.started { return } + if bsa.pauseUpdateState { + // If updates are paused, do not call UpdateState, but remember that we + // need to call it when they are resumed. + bsa.needUpdateStateOnResume = true + return + } bsa.cc.UpdateState(bsa.build()) } @@ -183,13 +221,18 @@ func (bsa *balancerStateAggregator) build() balancer.State { // handling the special connecting after ready, as in UpdateState(). Then a // function to calculate the aggregated connectivity state as in this // function. - var readyN, connectingN int + // + // TODO: use balancer.ConnectivityStateEvaluator to calculate the aggregated + // state. + var readyN, connectingN, idleN int for _, ps := range bsa.idToPickerState { switch ps.stateToAggregate { case connectivity.Ready: readyN++ case connectivity.Connecting: connectingN++ + case connectivity.Idle: + idleN++ } } var aggregatedState connectivity.State @@ -198,6 +241,8 @@ func (bsa *balancerStateAggregator) build() balancer.State { aggregatedState = connectivity.Ready case connectingN > 0: aggregatedState = connectivity.Connecting + case idleN > 0: + aggregatedState = connectivity.Idle default: aggregatedState = connectivity.TransientFailure } diff --git a/xds/internal/balancer/clustermanager/clustermanager.go b/xds/internal/balancer/clustermanager/clustermanager.go index 1e4dee7f5d3a..6ac7a39b2b4c 100644 --- a/xds/internal/balancer/clustermanager/clustermanager.go +++ b/xds/internal/balancer/clustermanager/clustermanager.go @@ -25,37 +25,38 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/balancergroup" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" ) const balancerName = "xds_cluster_manager_experimental" func init() { - balancer.Register(builder{}) + balancer.Register(bb{}) } -type builder struct{} +type bb struct{} -func (builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { b := &bal{} b.logger = prefixLogger(b) b.stateAggregator = newBalancerStateAggregator(cc, b.logger) b.stateAggregator.start() - b.bg = balancergroup.New(cc, opts, b.stateAggregator, nil, b.logger) + b.bg = balancergroup.New(cc, opts, b.stateAggregator, b.logger) b.bg.Start() b.logger.Infof("Created") return b } -func (builder) Name() string { +func (bb) Name() string { return balancerName } -func (builder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { return parseConfig(c) } @@ -92,6 +93,11 @@ func (b *bal) updateChildren(s balancer.ClientConnState, newConfig *lbConfig) { b.stateAggregator.add(name) // Then add to the balancer group. b.bg.Add(name, balancer.Get(newT.ChildPolicy.Name)) + } else { + // Already present, check for type change and if so send down a new builder. + if newT.ChildPolicy.Name != b.children[name].ChildPolicy.Name { + b.bg.UpdateBuilder(name, balancer.Get(newT.ChildPolicy.Name)) + } } // TODO: handle error? How to aggregate errors and return? _ = b.bg.UpdateClientConnState(name, balancer.ClientConnState{ @@ -115,8 +121,10 @@ func (b *bal) UpdateClientConnState(s balancer.ClientConnState) error { if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } - b.logger.Infof("update with config %+v, resolver state %+v", s.BalancerConfig, s.ResolverState) + b.logger.Infof("update with config %+v, resolver state %+v", pretty.ToJSON(s.BalancerConfig), s.ResolverState) + b.stateAggregator.pauseStateUpdates() + defer b.stateAggregator.resumeStateUpdates() b.updateChildren(s, newConfig) return nil } @@ -132,6 +140,11 @@ func (b *bal) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnStat func (b *bal) Close() { b.stateAggregator.close() b.bg.Close() + b.logger.Infof("Shutdown") +} + +func (b *bal) ExitIdle() { + b.bg.ExitIdle() } const prefix = "[xds-cluster-manager-lb %p] " diff --git a/xds/internal/balancer/clustermanager/clustermanager_test.go b/xds/internal/balancer/clustermanager/clustermanager_test.go index a40d954ad64f..7d5966339444 100644 --- a/xds/internal/balancer/clustermanager/clustermanager_test.go +++ b/xds/internal/balancer/clustermanager/clustermanager_test.go @@ -25,19 +25,20 @@ import ( "time" "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/hierarchy" - itestutils "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/testutils" ) type s struct { @@ -48,6 +49,11 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + var ( rtBuilder balancer.Builder rtParser balancer.ConfigParser @@ -85,7 +91,7 @@ type ignoreAttrsRRBalancer struct { func (trrb *ignoreAttrsRRBalancer) UpdateClientConnState(s balancer.ClientConnState) error { var newAddrs []resolver.Address for _, a := range s.ResolverState.Addresses { - a.Attributes = nil + a.BalancerAttributes = nil newAddrs = append(newAddrs, a) } s.ResolverState.Addresses = newAddrs @@ -102,6 +108,7 @@ func init() { rtParser = rtBuilder.(balancer.ConfigParser) balancer.Register(&ignoreAttrsRRBuilder{balancer.Get(roundrobin.Name)}) + balancer.Register(wrappedPickFirstBalancerBuilder{}) balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond } @@ -137,8 +144,8 @@ func TestClusterPicks(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. wantAddrs := []resolver.Address{ - {Addr: testBackendAddrStrs[0], Attributes: nil}, - {Addr: testBackendAddrStrs[1], Attributes: nil}, + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ @@ -156,11 +163,11 @@ func TestClusterPicks(t *testing.T) { for range wantAddrs { addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m1[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -215,8 +222,8 @@ func TestConfigUpdateAddCluster(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. wantAddrs := []resolver.Address{ - {Addr: testBackendAddrStrs[0], Attributes: nil}, - {Addr: testBackendAddrStrs[1], Attributes: nil}, + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ @@ -234,11 +241,11 @@ func TestConfigUpdateAddCluster(t *testing.T) { for range wantAddrs { addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m1[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -285,7 +292,7 @@ func TestConfigUpdateAddCluster(t *testing.T) { if err != nil { t.Fatalf("failed to parse balancer config: %v", err) } - wantAddrs = append(wantAddrs, resolver.Address{Addr: testBackendAddrStrs[2], Attributes: nil}) + wantAddrs = append(wantAddrs, resolver.Address{Addr: testBackendAddrStrs[2], BalancerAttributes: nil}) if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), @@ -300,11 +307,11 @@ func TestConfigUpdateAddCluster(t *testing.T) { // Expect exactly one new subconn. addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m1[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -372,8 +379,8 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { // Send the config, and an address with hierarchy path ["cluster_1"]. wantAddrs := []resolver.Address{ - {Addr: testBackendAddrStrs[0], Attributes: nil}, - {Addr: testBackendAddrStrs[1], Attributes: nil}, + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, } if err := rtb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{Addresses: []resolver.Address{ @@ -391,11 +398,11 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { for range wantAddrs { addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m1[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -475,11 +482,11 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { for range wantAddrs { addrs := <-cc.NewSubConnAddrsCh if len(hierarchy.Get(addrs[0])) != 0 { - t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].Attributes) + t.Fatalf("NewSubConn with address %+v, attrs %+v, want address with hierarchy cleared", addrs[0], addrs[0].BalancerAttributes) } sc := <-cc.NewSubConnCh // Clear the attributes before adding to map. - addrs[0].Attributes = nil + addrs[0].BalancerAttributes = nil m2[addrs[0]] = sc rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) @@ -517,17 +524,16 @@ func TestRoutingConfigUpdateDeleteAll(t *testing.T) { func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { const ( balancerName = "stubBalancer-TestClusterManagerForwardsBalancerBuildOptions" - parent = int64(1234) userAgent = "ua" defaultTestTimeout = 1 * time.Second ) // Setup the stub balancer such that we can read the build options passed to // it in the UpdateClientConnState method. - ccsCh := itestutils.NewChannel() + ccsCh := testutils.NewChannel() bOpts := balancer.BuildOptions{ DialCreds: insecure.NewCredentials(), - ChannelzParentID: parent, + ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefChannel, 1234, nil), CustomUserAgent: userAgent, } stub.Register(balancerName, stub.BalancerFuncs{ @@ -560,8 +566,256 @@ func TestClusterManagerForwardsBalancerBuildOptions(t *testing.T) { } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if v, err := ccsCh.Receive(ctx); err != nil { - err2 := v.(error) - t.Fatal(err2) + v, err := ccsCh.Receive(ctx) + if err != nil { + t.Fatalf("timed out waiting for UpdateClientConnState result: %v", err) + } + if v != nil { + t.Fatal(v) + } +} + +const initIdleBalancerName = "test-init-Idle-balancer" + +var errTestInitIdle = fmt.Errorf("init Idle balancer error 0") + +func init() { + stub.Register(initIdleBalancerName, stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { + bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + err := fmt.Errorf("wrong picker error") + if state.ConnectivityState == connectivity.Idle { + err = errTestInitIdle + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &testutils.TestConstPicker{Err: err}, + }) + }, + }) +} + +// TestInitialIdle covers the case that if the child reports Idle, the overall +// state will be Idle. +func TestInitialIdle(t *testing.T) { + cc := testutils.NewTestClientConn(t) + rtb := rtBuilder.Build(cc, balancer.BuildOptions{}) + + configJSON1 := `{ +"children": { + "cds:cluster_1":{ "childPolicy": [{"test-init-Idle-balancer":""}] } +} +}` + + config1, err := rtParser.ParseConfig([]byte(configJSON1)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + } + if err := rtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), + }}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that a subconn is created with the address, and the hierarchy path + // in the address is cleared. + for range wantAddrs { + sc := <-cc.NewSubConnCh + rtb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + } + + if state1 := <-cc.NewStateCh; state1 != connectivity.Idle { + t.Fatalf("Received aggregated state: %v, want Idle", state1) + } +} + +// TestClusterGracefulSwitch tests the graceful switch functionality for a child +// of the cluster manager. At first, the child is configured as a round robin +// load balancer, and thus should behave accordingly. The test then gracefully +// switches this child to a pick first load balancer. Once that balancer updates +// it's state and completes the graceful switch process the new picker should +// reflect this change. +func TestClusterGracefulSwitch(t *testing.T) { + cc := testutils.NewTestClientConn(t) + rtb := rtBuilder.Build(cc, balancer.BuildOptions{}) + + configJSON1 := `{ +"children": { + "csp:cluster":{ "childPolicy": [{"ignore_attrs_round_robin":""}] } +} +}` + config1, err := rtParser.ParseConfig([]byte(configJSON1)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + {Addr: testBackendAddrStrs[1], BalancerAttributes: nil}, + } + if err := rtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[0], []string{"csp:cluster"}), + }}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + sc1 := <-cc.NewSubConnCh + rtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + rtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p1 := <-cc.NewPickerCh + pi := balancer.PickInfo{ + Ctx: SetPickedCluster(context.Background(), "csp:cluster"), + } + testPick(t, p1, pi, sc1, nil) + + // Same cluster, different balancer type. + configJSON2 := `{ +"children": { + "csp:cluster":{ "childPolicy": [{"wrappedPickFirstBalancer":""}] } +} +}` + config2, err := rtParser.ParseConfig([]byte(configJSON2)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + if err := rtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[1], []string{"csp:cluster"}), + }}, + BalancerConfig: config2, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + sc2 := <-cc.NewSubConnCh + // Update the pick first balancers SubConn as CONNECTING. This will cause + // the pick first balancer to UpdateState() with CONNECTING, which shouldn't send + // a Picker update back, as the Graceful Switch process is not complete. + rtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + select { + case <-cc.NewPickerCh: + t.Fatalf("No new picker should have been sent due to the Graceful Switch process not completing") + case <-ctx.Done(): + } + + // Update the pick first balancers SubConn as READY. This will cause + // the pick first balancer to UpdateState() with READY, which should send a + // Picker update back, as the Graceful Switch process is complete. This + // Picker should always pick the pick first's created SubConn. + rtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p2 := <-cc.NewPickerCh + testPick(t, p2, pi, sc2, nil) + // The Graceful Switch process completing for the child should cause the + // SubConns for the balancer being gracefully switched from to get deleted. + ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + select { + case <-ctx.Done(): + t.Fatalf("error waiting for RemoveSubConn()") + case rsc := <-cc.RemoveSubConnCh: + // The SubConn removed should have been the created SubConn + // from the child before switching. + if rsc != sc1 { + t.Fatalf("RemoveSubConn() got: %v, want %v", rsc, sc1) + } + } +} + +type wrappedPickFirstBalancerBuilder struct{} + +func (wrappedPickFirstBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(grpc.PickFirstBalancerName) + wpfb := &wrappedPickFirstBalancer{ + ClientConn: cc, + } + pf := builder.Build(wpfb, opts) + wpfb.Balancer = pf + return wpfb +} + +func (wrappedPickFirstBalancerBuilder) Name() string { + return "wrappedPickFirstBalancer" +} + +type wrappedPickFirstBalancer struct { + balancer.Balancer + balancer.ClientConn +} + +func (wb *wrappedPickFirstBalancer) UpdateState(state balancer.State) { + // Eat it if IDLE - allows it to switch over only on a READY SubConn. + if state.ConnectivityState == connectivity.Idle { + return + } + wb.ClientConn.UpdateState(state) +} + +// tcc wraps a testutils.TestClientConn but stores all state transitions in a +// slice. +type tcc struct { + *testutils.TestClientConn + states []balancer.State +} + +func (t *tcc) UpdateState(bs balancer.State) { + t.states = append(t.states, bs) + t.TestClientConn.UpdateState(bs) +} + +func (s) TestUpdateStatePauses(t *testing.T) { + cc := &tcc{TestClientConn: testutils.NewTestClientConn(t)} + + balFuncs := stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: nil}) + bd.ClientConn.UpdateState(balancer.State{ConnectivityState: connectivity.Ready, Picker: nil}) + return nil + }, + } + stub.Register("update_state_balancer", balFuncs) + + rtb := rtBuilder.Build(cc, balancer.BuildOptions{}) + + configJSON1 := `{ +"children": { + "cds:cluster_1":{ "childPolicy": [{"update_state_balancer":""}] } +} +}` + + config1, err := rtParser.ParseConfig([]byte(configJSON1)) + if err != nil { + t.Fatalf("failed to parse balancer config: %v", err) + } + + // Send the config, and an address with hierarchy path ["cluster_1"]. + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0], BalancerAttributes: nil}, + } + if err := rtb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + hierarchy.Set(wantAddrs[0], []string{"cds:cluster_1"}), + }}, + BalancerConfig: config1, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Verify that the only state update is the second one called by the child. + if len(cc.states) != 1 || cc.states[0].ConnectivityState != connectivity.Ready { + t.Fatalf("cc.states = %v; want [connectivity.Ready]", cc.states) } } diff --git a/xds/internal/balancer/clustermanager/config_test.go b/xds/internal/balancer/clustermanager/config_test.go index 3328ba1d300f..23c25755ee30 100644 --- a/xds/internal/balancer/clustermanager/config_test.go +++ b/xds/internal/balancer/clustermanager/config_test.go @@ -23,9 +23,9 @@ import ( "github.com/google/go-cmp/cmp" "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/weightedtarget" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" - _ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" ) const ( diff --git a/xds/internal/balancer/clusterresolver/clusterresolver.go b/xds/internal/balancer/clusterresolver/clusterresolver.go new file mode 100644 index 000000000000..6faf81ab552c --- /dev/null +++ b/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -0,0 +1,409 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterresolver contains the implementation of the +// xds_cluster_resolver_experimental LB policy which resolves endpoint addresses +// using a list of one or more discovery mechanisms. +package clusterresolver + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/nop" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// Name is the name of the cluster_resolver balancer. +const Name = "cluster_resolver_experimental" + +var ( + errBalancerClosed = errors.New("cdsBalancer is closed") + newChildBalancer = func(bb balancer.Builder, cc balancer.ClientConn, o balancer.BuildOptions) balancer.Balancer { + return bb.Build(cc, o) + } +) + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +// Build helps implement the balancer.Builder interface. +func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + priorityBuilder := balancer.Get(priority.Name) + if priorityBuilder == nil { + logger.Errorf("%q LB policy is needed but not registered", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", priority.Name)) + } + priorityConfigParser, ok := priorityBuilder.(balancer.ConfigParser) + if !ok { + logger.Errorf("%q LB policy does not implement a config parser", priority.Name) + return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", priority.Name)) + } + + b := &clusterResolverBalancer{ + bOpts: opts, + updateCh: buffer.NewUnbounded(), + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + + priorityBuilder: priorityBuilder, + priorityConfigParser: priorityConfigParser, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + + b.resourceWatcher = newResourceResolver(b, b.logger) + b.cc = &ccWrapper{ + ClientConn: cc, + resourceWatcher: b.resourceWatcher, + } + + go b.run() + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(j json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + odBuilder := balancer.Get(outlierdetection.Name) + if odBuilder == nil { + // Shouldn't happen, registered through imported Outlier Detection, + // defensive programming. + return nil, fmt.Errorf("%q LB policy is needed but not registered", outlierdetection.Name) + } + odParser, ok := odBuilder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported Outlier Detection builder has this method. + return nil, fmt.Errorf("%q LB policy does not implement a config parser", outlierdetection.Name) + } + + var cfg *LBConfig + if err := json.Unmarshal(j, &cfg); err != nil { + return nil, fmt.Errorf("unable to unmarshal balancer config %s into cluster-resolver config, error: %v", string(j), err) + } + + if envconfig.XDSOutlierDetection { + for i, dm := range cfg.DiscoveryMechanisms { + lbCfg, err := odParser.ParseConfig(dm.OutlierDetection) + if err != nil { + return nil, fmt.Errorf("error parsing Outlier Detection config %v: %v", dm.OutlierDetection, err) + } + odCfg, ok := lbCfg.(*outlierdetection.LBConfig) + if !ok { + // Shouldn't happen, Parser built at build time with Outlier Detection + // builder pulled from gRPC LB Registry. + return nil, fmt.Errorf("odParser returned config with unexpected type %T: %v", lbCfg, lbCfg) + } + cfg.DiscoveryMechanisms[i].outlierDetection = *odCfg + } + } + if err := json.Unmarshal(cfg.XDSLBPolicy, &cfg.xdsLBPolicy); err != nil { + // This will never occur, valid configuration is emitted from the xDS + // Client. Validity is already checked in the xDS Client, however, this + // double validation is present because Unmarshalling and Validating are + // coupled into one json.Unmarshal operation). We will switch this in + // the future to two separate operations. + return nil, fmt.Errorf("error unmarshaling xDS LB Policy: %v", err) + } + return cfg, nil +} + +// ccUpdate wraps a clientConn update received from gRPC. +type ccUpdate struct { + state balancer.ClientConnState + err error +} + +// scUpdate wraps a subConn update received from gRPC. This is directly passed +// on to the child policy. +type scUpdate struct { + subConn balancer.SubConn + state balancer.SubConnState +} + +type exitIdle struct{} + +// clusterResolverBalancer resolves endpoint addresses using a list of one or +// more discovery mechanisms. +type clusterResolverBalancer struct { + cc balancer.ClientConn + bOpts balancer.BuildOptions + updateCh *buffer.Unbounded // Channel for updates from gRPC. + resourceWatcher *resourceResolver + logger *grpclog.PrefixLogger + closed *grpcsync.Event + done *grpcsync.Event + + priorityBuilder balancer.Builder + priorityConfigParser balancer.ConfigParser + + config *LBConfig + configRaw *serviceconfig.ParseResult + xdsClient xdsclient.XDSClient // xDS client to watch EDS resource. + attrsWithClient *attributes.Attributes // Attributes with xdsClient attached to be passed to the child policies. + + child balancer.Balancer + priorities []priorityConfig + watchUpdateReceived bool +} + +// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. +// +// A good update results in creation of endpoint resolvers for the configured +// discovery mechanisms. An update with an error results in cancellation of any +// existing endpoint resolution and propagation of the same to the child policy. +func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { + if err := update.err; err != nil { + b.handleErrorFromUpdate(err, true) + return + } + + b.logger.Infof("Received new balancer config: %v", pretty.ToJSON(update.state.BalancerConfig)) + cfg, _ := update.state.BalancerConfig.(*LBConfig) + if cfg == nil { + b.logger.Warningf("Ignoring unsupported balancer configuration of type: %T", update.state.BalancerConfig) + return + } + + b.config = cfg + b.configRaw = update.state.ResolverState.ServiceConfig + b.resourceWatcher.updateMechanisms(cfg.DiscoveryMechanisms) + + // The child policy is created only after all configured discovery + // mechanisms have been successfully returned endpoints. If that is not the + // case, we return early. + if !b.watchUpdateReceived { + return + } + b.updateChildConfig() +} + +// handleResourceUpdate handles a resource update or error from the resource +// resolver by propagating the same to the child LB policy. +func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { + if err := update.err; err != nil { + b.handleErrorFromUpdate(err, false) + return + } + + b.watchUpdateReceived = true + b.priorities = update.priorities + + // An update from the resource resolver contains resolved endpoint addresses + // for all configured discovery mechanisms ordered by priority. This is used + // to generate configuration for the priority LB policy. + b.updateChildConfig() +} + +// updateChildConfig builds child policy configuration using endpoint addresses +// returned by the resource resolver and child policy configuration provided by +// parent LB policy. +// +// A child policy is created if one doesn't already exist. The newly built +// configuration is then pushed to the child policy. +func (b *clusterResolverBalancer) updateChildConfig() { + if b.child == nil { + b.child = newChildBalancer(b.priorityBuilder, b.cc, b.bOpts) + } + + childCfgBytes, addrs, err := buildPriorityConfigJSON(b.priorities, &b.config.xdsLBPolicy) + if err != nil { + b.logger.Warningf("Failed to build child policy config: %v", err) + return + } + childCfg, err := b.priorityConfigParser.ParseConfig(childCfgBytes) + if err != nil { + b.logger.Warningf("Failed to parse child policy config. This should never happen because the config was generated: %v", err) + return + } + b.logger.Infof("Built child policy config: %v", pretty.ToJSON(childCfg)) + + if err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: addrs, + ServiceConfig: b.configRaw, + Attributes: b.attrsWithClient, + }, + BalancerConfig: childCfg, + }); err != nil { + b.logger.Warningf("Failed to push config to child policy: %v", err) + } +} + +// handleErrorFromUpdate handles errors from the parent LB policy and endpoint +// resolvers. fromParent is true if error is from the parent LB policy. In both +// cases, the error is propagated to the child policy, if one exists. +func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) { + b.logger.Warningf("Received error: %v", err) + + // A resource-not-found error from the parent LB policy means that the LDS + // or CDS resource was removed. This should result in endpoint resolvers + // being stopped here. + // + // A resource-not-found error from the EDS endpoint resolver means that the + // EDS resource was removed. No action needs to be taken for this, and we + // should continue watching the same EDS resource. + if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.resourceWatcher.stop() + } + + if b.child != nil { + b.child.ResolverError(err) + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) +} + +// run is a long-running goroutine that handles updates from gRPC and endpoint +// resolvers. The methods handling the individual updates simply push them onto +// a channel which is read and acted upon from here. +func (b *clusterResolverBalancer) run() { + for { + select { + case u, ok := <-b.updateCh.Get(): + if !ok { + return + } + b.updateCh.Load() + switch update := u.(type) { + case *ccUpdate: + b.handleClientConnUpdate(update) + case *scUpdate: + // SubConn updates are simply handed over to the underlying + // child balancer. + if b.child == nil { + b.logger.Errorf("Received a SubConn update {%+v} with no child policy", update) + break + } + b.child.UpdateSubConnState(update.subConn, update.state) + case exitIdle: + if b.child == nil { + b.logger.Errorf("xds: received ExitIdle with no child balancer") + break + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.child.(balancer.ExitIdler); ok { + ei.ExitIdle() + } + } + case u := <-b.resourceWatcher.updateChannel: + b.handleResourceUpdate(u) + + // Close results in stopping the endpoint resolvers and closing the + // underlying child policy and is the only way to exit this goroutine. + case <-b.closed.Done(): + b.resourceWatcher.stop() + + if b.child != nil { + b.child.Close() + b.child = nil + } + b.updateCh.Close() + // This is the *ONLY* point of return from this function. + b.logger.Infof("Shutdown") + b.done.Fire() + return + } + } +} + +// Following are methods to implement the balancer interface. + +func (b *clusterResolverBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if b.closed.HasFired() { + b.logger.Warningf("Received update from gRPC {%+v} after close", state) + return errBalancerClosed + } + + if b.xdsClient == nil { + c := xdsclient.FromResolverState(state.ResolverState) + if c == nil { + return balancer.ErrBadResolverState + } + b.xdsClient = c + b.attrsWithClient = state.ResolverState.Attributes + } + + b.updateCh.Put(&ccUpdate{state: state}) + return nil +} + +// ResolverError handles errors reported by the xdsResolver. +func (b *clusterResolverBalancer) ResolverError(err error) { + if b.closed.HasFired() { + b.logger.Warningf("Received resolver error {%v} after close", err) + return + } + b.updateCh.Put(&ccUpdate{err: err}) +} + +// UpdateSubConnState handles subConn updates from gRPC. +func (b *clusterResolverBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if b.closed.HasFired() { + b.logger.Warningf("Received subConn update {%v, %v} after close", sc, state) + return + } + b.updateCh.Put(&scUpdate{subConn: sc, state: state}) +} + +// Close closes the cdsBalancer and the underlying child balancer. +func (b *clusterResolverBalancer) Close() { + b.closed.Fire() + <-b.done.Done() +} + +func (b *clusterResolverBalancer) ExitIdle() { + b.updateCh.Put(exitIdle{}) +} + +// ccWrapper overrides ResolveNow(), so that re-resolution from the child +// policies will trigger the DNS resolver in cluster_resolver balancer. +type ccWrapper struct { + balancer.ClientConn + resourceWatcher *resourceResolver +} + +func (c *ccWrapper) ResolveNow(resolver.ResolveNowOptions) { + c.resourceWatcher.resolveNow() +} diff --git a/xds/internal/balancer/edsbalancer/util.go b/xds/internal/balancer/clusterresolver/clusterresolver_test.go similarity index 56% rename from xds/internal/balancer/edsbalancer/util.go rename to xds/internal/balancer/clusterresolver/clusterresolver_test.go index 132950426466..bdf6e60b35c6 100644 --- a/xds/internal/balancer/edsbalancer/util.go +++ b/xds/internal/balancer/clusterresolver/clusterresolver_test.go @@ -1,4 +1,5 @@ /* + * * Copyright 2019 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,33 +13,31 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. + * */ -package edsbalancer +package clusterresolver import ( - "google.golang.org/grpc/internal/wrr" - xdsclient "google.golang.org/grpc/xds/internal/client" -) - -var newRandomWRR = wrr.NewRandom + "testing" + "time" -type dropper struct { - c xdsclient.OverloadDropConfig - w wrr.WRR -} + "google.golang.org/grpc/internal/grpctest" +) -func newDropper(c xdsclient.OverloadDropConfig) *dropper { - w := newRandomWRR() - w.Add(true, int64(c.Numerator)) - w.Add(false, int64(c.Denominator-c.Numerator)) +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond + testEDSService = "test-eds-service-name" + testClusterName = "test-cluster-name" + testClusterName2 = "google_cfe_some-name" + testBalancerNameFooBar = "foo.bar" +) - return &dropper{ - c: c, - w: w, - } +type s struct { + grpctest.Tester } -func (d *dropper) drop() (ret bool) { - return d.w.Next().(bool) +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) } diff --git a/xds/internal/balancer/clusterresolver/config.go b/xds/internal/balancer/clusterresolver/config.go new file mode 100644 index 000000000000..c67608819185 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/config.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "bytes" + "encoding/json" + "fmt" + + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +// DiscoveryMechanismType is the type of discovery mechanism. +type DiscoveryMechanismType int + +const ( + // DiscoveryMechanismTypeEDS is eds. + DiscoveryMechanismTypeEDS DiscoveryMechanismType = iota // `json:"EDS"` + // DiscoveryMechanismTypeLogicalDNS is DNS. + DiscoveryMechanismTypeLogicalDNS // `json:"LOGICAL_DNS"` +) + +// MarshalJSON marshals a DiscoveryMechanismType to a quoted json string. +// +// This is necessary to handle enum (as strings) from JSON. +// +// Note that this needs to be defined on the type not pointer, otherwise the +// variables of this type will marshal to int not string. +func (t DiscoveryMechanismType) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBufferString(`"`) + switch t { + case DiscoveryMechanismTypeEDS: + buffer.WriteString("EDS") + case DiscoveryMechanismTypeLogicalDNS: + buffer.WriteString("LOGICAL_DNS") + } + buffer.WriteString(`"`) + return buffer.Bytes(), nil +} + +// UnmarshalJSON unmarshals a quoted json string to the DiscoveryMechanismType. +func (t *DiscoveryMechanismType) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + switch s { + case "EDS": + *t = DiscoveryMechanismTypeEDS + case "LOGICAL_DNS": + *t = DiscoveryMechanismTypeLogicalDNS + default: + return fmt.Errorf("unable to unmarshal string %q to type DiscoveryMechanismType", s) + } + return nil +} + +// DiscoveryMechanism is the discovery mechanism, can be either EDS or DNS. +// +// For DNS, the ClientConn target will be used for name resolution. +// +// For EDS, if EDSServiceName is not empty, it will be used for watching. If +// EDSServiceName is empty, Cluster will be used. +type DiscoveryMechanism struct { + // Cluster is the cluster name. + Cluster string `json:"cluster,omitempty"` + // LoadReportingServer is the LRS server to send load reports to. If not + // present, load reporting will be disabled. + LoadReportingServer *bootstrap.ServerConfig `json:"lrsLoadReportingServer,omitempty"` + // MaxConcurrentRequests is the maximum number of outstanding requests can + // be made to the upstream cluster. Default is 1024. + MaxConcurrentRequests *uint32 `json:"maxConcurrentRequests,omitempty"` + // Type is the discovery mechanism type. + Type DiscoveryMechanismType `json:"type,omitempty"` + // EDSServiceName is the EDS service name, as returned in CDS. May be unset + // if not specified in CDS. For type EDS only. + // + // This is used for EDS watch if set. If unset, Cluster is used for EDS + // watch. + EDSServiceName string `json:"edsServiceName,omitempty"` + // DNSHostname is the DNS name to resolve in "host:port" form. For type + // LOGICAL_DNS only. + DNSHostname string `json:"dnsHostname,omitempty"` + // OutlierDetection is the Outlier Detection LB configuration for this + // priority. + OutlierDetection json.RawMessage `json:"outlierDetection,omitempty"` + outlierDetection outlierdetection.LBConfig +} + +// Equal returns whether the DiscoveryMechanism is the same with the parameter. +func (dm DiscoveryMechanism) Equal(b DiscoveryMechanism) bool { + od := &dm.outlierDetection + switch { + case dm.Cluster != b.Cluster: + return false + case !equalUint32P(dm.MaxConcurrentRequests, b.MaxConcurrentRequests): + return false + case dm.Type != b.Type: + return false + case dm.EDSServiceName != b.EDSServiceName: + return false + case dm.DNSHostname != b.DNSHostname: + return false + case !od.EqualIgnoringChildPolicy(&b.outlierDetection): + return false + } + + if dm.LoadReportingServer == nil && b.LoadReportingServer == nil { + return true + } + if (dm.LoadReportingServer != nil) != (b.LoadReportingServer != nil) { + return false + } + return dm.LoadReportingServer.String() == b.LoadReportingServer.String() +} + +func equalUint32P(a, b *uint32) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + +// LBConfig is the config for cluster resolver balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // DiscoveryMechanisms is an ordered list of discovery mechanisms. + // + // Must have at least one element. Results from each discovery mechanism are + // concatenated together in successive priorities. + DiscoveryMechanisms []DiscoveryMechanism `json:"discoveryMechanisms,omitempty"` + + // XDSLBPolicy specifies the policy for locality picking and endpoint picking. + XDSLBPolicy json.RawMessage `json:"xdsLbPolicy,omitempty"` + xdsLBPolicy internalserviceconfig.BalancerConfig +} diff --git a/xds/internal/balancer/clusterresolver/config_test.go b/xds/internal/balancer/clusterresolver/config_test.go new file mode 100644 index 000000000000..608c17ef78c8 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/config_test.go @@ -0,0 +1,365 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "encoding/json" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/balancer" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +func TestDiscoveryMechanismTypeMarshalJSON(t *testing.T) { + tests := []struct { + name string + typ DiscoveryMechanismType + want string + }{ + { + name: "eds", + typ: DiscoveryMechanismTypeEDS, + want: `"EDS"`, + }, + { + name: "dns", + typ: DiscoveryMechanismTypeLogicalDNS, + want: `"LOGICAL_DNS"`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got, err := json.Marshal(tt.typ); err != nil || string(got) != tt.want { + t.Fatalf("DiscoveryMechanismTypeEDS.MarshalJSON() = (%v, %v), want (%s, nil)", string(got), err, tt.want) + } + }) + } +} +func TestDiscoveryMechanismTypeUnmarshalJSON(t *testing.T) { + tests := []struct { + name string + js string + want DiscoveryMechanismType + wantErr bool + }{ + { + name: "eds", + js: `"EDS"`, + want: DiscoveryMechanismTypeEDS, + }, + { + name: "dns", + js: `"LOGICAL_DNS"`, + want: DiscoveryMechanismTypeLogicalDNS, + }, + { + name: "error", + js: `"1234"`, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got DiscoveryMechanismType + err := json.Unmarshal([]byte(tt.js), &got) + if (err != nil) != tt.wantErr { + t.Fatalf("DiscoveryMechanismTypeEDS.UnmarshalJSON() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Fatalf("DiscoveryMechanismTypeEDS.UnmarshalJSON() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} + +const ( + testJSONConfig1 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"ROUND_ROBIN":{}}] +}` + testJSONConfig2 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} + },{ + "type": "LOGICAL_DNS", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"ROUND_ROBIN":{}}] +}` + testJSONConfig3 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"ROUND_ROBIN":{}}] +}` + testJSONConfig4 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"ring_hash_experimental":{}}] +}` + testJSONConfig5 = `{ + "discoveryMechanisms": [{ + "cluster": "test-cluster-name", + "lrsLoadReportingServer": { + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }, + "maxConcurrentRequests": 314, + "type": "EDS", + "edsServiceName": "test-eds-service-name", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"ROUND_ROBIN":{}}] +}` +) + +var testLRSServerConfig = &bootstrap.ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: bootstrap.ChannelCreds{ + Type: "google_default", + }, +} + +func TestParseConfig(t *testing.T) { + tests := []struct { + name string + js string + want *LBConfig + wantErr bool + }{ + { + name: "empty json", + js: "", + want: nil, + wantErr: true, + }, + { + name: "OK with one discovery mechanism", + js: testJSONConfig1, + want: &LBConfig{ + DiscoveryMechanisms: []DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, + }, + }, + xdsLBPolicy: iserviceconfig.BalancerConfig{ // do we want to make this not pointer + Name: "ROUND_ROBIN", + Config: nil, + }, + }, + wantErr: false, + }, + { + name: "OK with multiple discovery mechanisms", + js: testJSONConfig2, + want: &LBConfig{ + DiscoveryMechanisms: []DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, + }, + { + Type: DiscoveryMechanismTypeLogicalDNS, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, + }, + }, + xdsLBPolicy: iserviceconfig.BalancerConfig{ + Name: "ROUND_ROBIN", + Config: nil, + }, + }, + wantErr: false, + }, + { + name: "OK with picking policy round_robin", + js: testJSONConfig3, + want: &LBConfig{ + DiscoveryMechanisms: []DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, + }, + }, + xdsLBPolicy: iserviceconfig.BalancerConfig{ + Name: "ROUND_ROBIN", + Config: nil, + }, + }, + wantErr: false, + }, + { + name: "OK with picking policy ring_hash", + js: testJSONConfig4, + want: &LBConfig{ + DiscoveryMechanisms: []DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, + }, + }, + xdsLBPolicy: iserviceconfig.BalancerConfig{ + Name: ringhash.Name, + Config: &ringhash.LBConfig{MinRingSize: 1024, MaxRingSize: 4096}, // Ringhash LB config with default min and max. + }, + }, + wantErr: false, + }, + { + name: "noop-outlier-detection", + js: testJSONConfig5, + want: &LBConfig{ + DiscoveryMechanisms: []DiscoveryMechanism{ + { + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSService, + outlierDetection: outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + // sre and fpe are both nil + }, + }, + }, + xdsLBPolicy: iserviceconfig.BalancerConfig{ + Name: "ROUND_ROBIN", + Config: nil, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + b := balancer.Get(Name) + if b == nil { + t.Fatalf("LB policy %q not registered", Name) + } + cfgParser, ok := b.(balancer.ConfigParser) + if !ok { + t.Fatalf("LB policy %q does not support config parsing", Name) + } + t.Run(tt.name, func(t *testing.T) { + got, err := cfgParser.ParseConfig([]byte(tt.js)) + if (err != nil) != tt.wantErr { + t.Fatalf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) + } + if tt.wantErr { + return + } + if diff := cmp.Diff(got, tt.want, cmp.AllowUnexported(LBConfig{}), cmpopts.IgnoreFields(LBConfig{}, "XDSLBPolicy")); diff != "" { + t.Errorf("parseConfig() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} + +func newUint32(i uint32) *uint32 { + return &i +} diff --git a/xds/internal/balancer/clusterresolver/configbuilder.go b/xds/internal/balancer/clusterresolver/configbuilder.go new file mode 100644 index 000000000000..4b83dfb2bfa0 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/configbuilder.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "encoding/json" + "fmt" + "sort" + + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/hierarchy" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const million = 1000000 + +// priorityConfig is config for one priority. For example, if there an EDS and a +// DNS, the priority list will be [priorityConfig{EDS}, priorityConfig{DNS}]. +// +// Each priorityConfig corresponds to one discovery mechanism from the LBConfig +// generated by the CDS balancer. The CDS balancer resolves the cluster name to +// an ordered list of discovery mechanisms (if the top cluster is an aggregated +// cluster), one for each underlying cluster. +type priorityConfig struct { + mechanism DiscoveryMechanism + // edsResp is set only if type is EDS. + edsResp xdsresource.EndpointsUpdate + // addresses is set only if type is DNS. + addresses []string + // Each discovery mechanism has a name generator so that the child policies + // can reuse names between updates (EDS updates for example). + childNameGen *nameGenerator +} + +// buildPriorityConfigJSON builds balancer config for the passed in +// priorities. +// +// The built tree of balancers (see test for the output struct). +// +// ┌────────┐ +// │priority│ +// └┬──────┬┘ +// │ │ +// ┌──────────▼─┐ ┌─▼──────────┐ +// │cluster_impl│ │cluster_impl│ +// └──────┬─────┘ └─────┬──────┘ +// │ │ +// ┌──────▼─────┐ ┌─────▼──────┐ +// │xDSLBPolicy │ │xDSLBPolicy │ (Locality and Endpoint picking layer) +// └────────────┘ └────────────┘ +func buildPriorityConfigJSON(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]byte, []resolver.Address, error) { + pc, addrs, err := buildPriorityConfig(priorities, xdsLBPolicy) + if err != nil { + return nil, nil, fmt.Errorf("failed to build priority config: %v", err) + } + ret, err := json.Marshal(pc) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshal built priority config struct into json: %v", err) + } + return ret, addrs, nil +} + +func buildPriorityConfig(priorities []priorityConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*priority.LBConfig, []resolver.Address, error) { + var ( + retConfig = &priority.LBConfig{Children: make(map[string]*priority.Child)} + retAddrs []resolver.Address + ) + for _, p := range priorities { + switch p.mechanism.Type { + case DiscoveryMechanismTypeEDS: + names, configs, addrs, err := buildClusterImplConfigForEDS(p.childNameGen, p.edsResp, p.mechanism, xdsLBPolicy) + if err != nil { + return nil, nil, err + } + retConfig.Priorities = append(retConfig.Priorities, names...) + retAddrs = append(retAddrs, addrs...) + var odCfgs map[string]*outlierdetection.LBConfig + if envconfig.XDSOutlierDetection { + odCfgs = convertClusterImplMapToOutlierDetection(configs, p.mechanism.outlierDetection) + for n, c := range odCfgs { + retConfig.Children[n] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: c}, + // Ignore all re-resolution from EDS children. + IgnoreReresolutionRequests: true, + } + } + continue + } + for n, c := range configs { + retConfig.Children[n] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: c}, + // Ignore all re-resolution from EDS children. + IgnoreReresolutionRequests: true, + } + + } + case DiscoveryMechanismTypeLogicalDNS: + name, config, addrs := buildClusterImplConfigForDNS(p.childNameGen, p.addresses, p.mechanism) + retConfig.Priorities = append(retConfig.Priorities, name) + retAddrs = append(retAddrs, addrs...) + var odCfg *outlierdetection.LBConfig + if envconfig.XDSOutlierDetection { + odCfg = makeClusterImplOutlierDetectionChild(config, p.mechanism.outlierDetection) + retConfig.Children[name] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: outlierdetection.Name, Config: odCfg}, + // Not ignore re-resolution from DNS children, they will trigger + // DNS to re-resolve. + IgnoreReresolutionRequests: false, + } + continue + } + retConfig.Children[name] = &priority.Child{ + Config: &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: config}, + // Not ignore re-resolution from DNS children, they will trigger + // DNS to re-resolve. + IgnoreReresolutionRequests: false, + } + } + } + return retConfig, retAddrs, nil +} + +func convertClusterImplMapToOutlierDetection(ciCfgs map[string]*clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) map[string]*outlierdetection.LBConfig { + odCfgs := make(map[string]*outlierdetection.LBConfig, len(ciCfgs)) + for n, c := range ciCfgs { + odCfgs[n] = makeClusterImplOutlierDetectionChild(c, odCfg) + } + return odCfgs +} + +func makeClusterImplOutlierDetectionChild(ciCfg *clusterimpl.LBConfig, odCfg outlierdetection.LBConfig) *outlierdetection.LBConfig { + odCfgRet := odCfg + odCfgRet.ChildPolicy = &internalserviceconfig.BalancerConfig{Name: clusterimpl.Name, Config: ciCfg} + return &odCfgRet +} + +func buildClusterImplConfigForDNS(g *nameGenerator, addrStrs []string, mechanism DiscoveryMechanism) (string, *clusterimpl.LBConfig, []resolver.Address) { + // Endpoint picking policy for DNS is hardcoded to pick_first. + const childPolicy = "pick_first" + retAddrs := make([]resolver.Address, 0, len(addrStrs)) + pName := fmt.Sprintf("priority-%v", g.prefix) + for _, addrStr := range addrStrs { + retAddrs = append(retAddrs, hierarchy.Set(resolver.Address{Addr: addrStr}, []string{pName})) + } + return pName, &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + ChildPolicy: &internalserviceconfig.BalancerConfig{Name: childPolicy}, + }, retAddrs +} + +// buildClusterImplConfigForEDS returns a list of cluster_impl configs, one for +// each priority, sorted by priority, and the addresses for each priority (with +// hierarchy attributes set). +// +// For example, if there are two priorities, the returned values will be +// - ["p0", "p1"] +// - map{"p0":p0_config, "p1":p1_config} +// - [p0_address_0, p0_address_1, p1_address_0, p1_address_1] +// - p0 addresses' hierarchy attributes are set to p0 +func buildClusterImplConfigForEDS(g *nameGenerator, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) { + drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops)) + for _, d := range edsResp.Drops { + drops = append(drops, clusterimpl.DropConfig{ + Category: d.Category, + RequestsPerMillion: d.Numerator * million / d.Denominator, + }) + } + + priorities := groupLocalitiesByPriority(edsResp.Localities) + retNames := g.generate(priorities) + retConfigs := make(map[string]*clusterimpl.LBConfig, len(retNames)) + var retAddrs []resolver.Address + for i, pName := range retNames { + priorityLocalities := priorities[i] + cfg, addrs, err := priorityLocalitiesToClusterImpl(priorityLocalities, pName, mechanism, drops, xdsLBPolicy) + if err != nil { + return nil, nil, nil, err + } + retConfigs[pName] = cfg + retAddrs = append(retAddrs, addrs...) + } + return retNames, retConfigs, retAddrs, nil +} + +// groupLocalitiesByPriority returns the localities grouped by priority. +// +// The returned list is sorted from higher priority to lower. Each item in the +// list is a group of localities. +// +// For example, for L0-p0, L1-p0, L2-p1, results will be +// - [[L0, L1], [L2]] +func groupLocalitiesByPriority(localities []xdsresource.Locality) [][]xdsresource.Locality { + var priorityIntSlice []int + priorities := make(map[int][]xdsresource.Locality) + for _, locality := range localities { + priority := int(locality.Priority) + priorities[priority] = append(priorities[priority], locality) + priorityIntSlice = append(priorityIntSlice, priority) + } + // Sort the priorities based on the int value, deduplicate, and then turn + // the sorted list into a string list. This will be child names, in priority + // order. + sort.Ints(priorityIntSlice) + priorityIntSliceDeduped := dedupSortedIntSlice(priorityIntSlice) + ret := make([][]xdsresource.Locality, 0, len(priorityIntSliceDeduped)) + for _, p := range priorityIntSliceDeduped { + ret = append(ret, priorities[p]) + } + return ret +} + +func dedupSortedIntSlice(a []int) []int { + if len(a) == 0 { + return a + } + i, j := 0, 1 + for ; j < len(a); j++ { + if a[i] == a[j] { + continue + } + i++ + if i != j { + a[i] = a[j] + } + } + return a[:i+1] +} + +// priorityLocalitiesToClusterImpl takes a list of localities (with the same +// priority), and generates a cluster impl policy config, and a list of +// addresses with their path hierarchy set to [priority-name, locality-name], so +// priority and the xDS LB Policy know which child policy each address is for. +func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) { + var addrs []resolver.Address + for _, locality := range localities { + var lw uint32 = 1 + if locality.Weight != 0 { + lw = locality.Weight + } + localityStr, err := locality.ID.ToString() + if err != nil { + localityStr = fmt.Sprintf("%+v", locality.ID) + } + for _, endpoint := range locality.Endpoints { + // Filter out all "unhealthy" endpoints (unknown and healthy are + // both considered to be healthy: + // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). + if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown { + continue + } + addr := resolver.Address{Addr: endpoint.Address} + addr = hierarchy.Set(addr, []string{priorityName, localityStr}) + addr = internal.SetLocalityID(addr, locality.ID) + // "To provide the xds_wrr_locality load balancer information about + // locality weights received from EDS, the cluster resolver will + // populate a new locality weight attribute for each address The + // attribute will have the weight (as an integer) of the locality + // the address is part of." - A52 + addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: lw}) + var ew uint32 = 1 + if endpoint.Weight != 0 { + ew = endpoint.Weight + } + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: lw * ew}) + addrs = append(addrs, addr) + } + } + return &clusterimpl.LBConfig{ + Cluster: mechanism.Cluster, + EDSServiceName: mechanism.EDSServiceName, + LoadReportingServer: mechanism.LoadReportingServer, + MaxConcurrentRequests: mechanism.MaxConcurrentRequests, + DropCategories: drops, + ChildPolicy: xdsLBPolicy, + }, addrs, nil +} diff --git a/xds/internal/balancer/clusterresolver/configbuilder_childname.go b/xds/internal/balancer/clusterresolver/configbuilder_childname.go new file mode 100644 index 000000000000..119f4c474752 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/configbuilder_childname.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "fmt" + + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// nameGenerator generates a child name for a list of priorities (each priority +// is a list of localities). +// +// The purpose of this generator is to reuse names between updates. So the +// struct keeps state between generate() calls, and a later generate() might +// return names returned by the previous call. +type nameGenerator struct { + existingNames map[internal.LocalityID]string + prefix uint64 + nextID uint64 +} + +func newNameGenerator(prefix uint64) *nameGenerator { + return &nameGenerator{prefix: prefix} +} + +// generate returns a list of names for the given list of priorities. +// +// Each priority is a list of localities. The name for the priority is picked as +// - for each locality in this priority, if it exists in the existing names, +// this priority will reuse the name +// - if no reusable name is found for this priority, a new name is generated +// +// For example: +// - update 1: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 2: [[L1], [L2], [L3]] --> ["0", "1", "2"] +// - update 3: [[L1, L2], [L3]] --> ["0", "2"] (Two priorities were merged) +// - update 4: [[L1], [L4]] --> ["0", "3",] (A priority was split, and a new priority was added) +func (ng *nameGenerator) generate(priorities [][]xdsresource.Locality) []string { + var ret []string + usedNames := make(map[string]bool) + newNames := make(map[internal.LocalityID]string) + for _, priority := range priorities { + var nameFound string + for _, locality := range priority { + if name, ok := ng.existingNames[locality.ID]; ok { + if !usedNames[name] { + nameFound = name + // Found a name to use. No need to process the remaining + // localities. + break + } + } + } + + if nameFound == "" { + // No appropriate used name is found. Make a new name. + nameFound = fmt.Sprintf("priority-%d-%d", ng.prefix, ng.nextID) + ng.nextID++ + } + + ret = append(ret, nameFound) + // All localities in this priority share the same name. Add them all to + // the new map. + for _, l := range priority { + newNames[l.ID] = nameFound + } + usedNames[nameFound] = true + } + ng.existingNames = newNames + return ret +} diff --git a/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go b/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go new file mode 100644 index 000000000000..6a3dbba83a4b --- /dev/null +++ b/xds/internal/balancer/clusterresolver/configbuilder_childname_test.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package clusterresolver + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +func Test_nameGenerator_generate(t *testing.T) { + tests := []struct { + name string + prefix uint64 + input1 [][]xdsresource.Locality + input2 [][]xdsresource.Locality + want []string + }{ + { + name: "init, two new priorities", + prefix: 3, + input1: nil, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, + }, + want: []string{"priority-3-0", "priority-3-1"}, + }, + { + name: "one new priority", + prefix: 1, + input1: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + }, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, + }, + want: []string{"priority-1-0", "priority-1-1"}, + }, + { + name: "merge two priorities", + prefix: 4, + input1: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}, {ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + want: []string{"priority-4-0", "priority-4-2"}, + }, + { + name: "swap two priorities", + input1: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + want: []string{"priority-0-1", "priority-0-0", "priority-0-2"}, + }, + { + name: "split priority", + input1: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}, {ID: internal.LocalityID{Zone: "L1"}}}, + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + input2: [][]xdsresource.Locality{ + {{ID: internal.LocalityID{Zone: "L0"}}}, + {{ID: internal.LocalityID{Zone: "L1"}}}, // This gets a newly generated name, sice "0-0" was already picked. + {{ID: internal.LocalityID{Zone: "L2"}}}, + }, + want: []string{"priority-0-0", "priority-0-2", "priority-0-1"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ng := newNameGenerator(tt.prefix) + got1 := ng.generate(tt.input1) + t.Logf("%v", got1) + got := ng.generate(tt.input2) + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("generate() = got: %v, want: %v, diff (-got +want): %s", got, tt.want, diff) + } + }) + } +} diff --git a/xds/internal/balancer/clusterresolver/configbuilder_test.go b/xds/internal/balancer/clusterresolver/configbuilder_test.go new file mode 100644 index 000000000000..b30686b18561 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/configbuilder_test.go @@ -0,0 +1,706 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "bytes" + "encoding/json" + "fmt" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/hierarchy" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const ( + testLRSServer = "test-lrs-server" + testMaxRequests = 314 + testEDSServiceName = "service-name-from-parent" + testDropCategory = "test-drops" + testDropOverMillion = 1 + + localityCount = 5 + addressPerLocality = 2 +) + +var ( + testLocalityIDs []internal.LocalityID + testAddressStrs [][]string + testEndpoints [][]xdsresource.Endpoint + + testLocalitiesP0, testLocalitiesP1 []xdsresource.Locality + + addrCmpOpts = cmp.Options{ + cmp.AllowUnexported(attributes.Attributes{}), + cmp.Transformer("SortAddrs", func(in []resolver.Address) []resolver.Address { + out := append([]resolver.Address(nil), in...) // Copy input to avoid mutating it + sort.Slice(out, func(i, j int) bool { + return out[i].Addr < out[j].Addr + }) + return out + }), + } + + noopODCfg = outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + } +) + +func init() { + for i := 0; i < localityCount; i++ { + testLocalityIDs = append(testLocalityIDs, internal.LocalityID{Zone: fmt.Sprintf("test-zone-%d", i)}) + var ( + addrs []string + ends []xdsresource.Endpoint + ) + for j := 0; j < addressPerLocality; j++ { + addr := fmt.Sprintf("addr-%d-%d", i, j) + addrs = append(addrs, addr) + ends = append(ends, xdsresource.Endpoint{ + Address: addr, + HealthStatus: xdsresource.EndpointHealthStatusHealthy, + }) + } + testAddressStrs = append(testAddressStrs, addrs) + testEndpoints = append(testEndpoints, ends) + } + + testLocalitiesP0 = []xdsresource.Locality{ + { + Endpoints: testEndpoints[0], + ID: testLocalityIDs[0], + Weight: 20, + Priority: 0, + }, + { + Endpoints: testEndpoints[1], + ID: testLocalityIDs[1], + Weight: 80, + Priority: 0, + }, + } + testLocalitiesP1 = []xdsresource.Locality{ + { + Endpoints: testEndpoints[2], + ID: testLocalityIDs[2], + Weight: 20, + Priority: 1, + }, + { + Endpoints: testEndpoints[3], + ID: testLocalityIDs[3], + Weight: 80, + Priority: 1, + }, + } +} + +// TestBuildPriorityConfigJSON is a sanity check that the built balancer config +// can be parsed. The behavior test is covered by TestBuildPriorityConfig. +func TestBuildPriorityConfigJSON(t *testing.T) { + gotConfig, _, err := buildPriorityConfigJSON([]priorityConfig{ + { + mechanism: DiscoveryMechanism{ + Cluster: testClusterName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, + }, + edsResp: xdsresource.EndpointsUpdate{ + Drops: []xdsresource.OverloadDropConfig{ + { + Category: testDropCategory, + Numerator: testDropOverMillion, + Denominator: million, + }, + }, + Localities: []xdsresource.Locality{ + testLocalitiesP0[0], + testLocalitiesP0[1], + testLocalitiesP1[0], + testLocalitiesP1[1], + }, + }, + childNameGen: newNameGenerator(0), + }, + { + mechanism: DiscoveryMechanism{ + Type: DiscoveryMechanismTypeLogicalDNS, + }, + addresses: testAddressStrs[4], + childNameGen: newNameGenerator(1), + }, + }, nil) + if err != nil { + t.Fatalf("buildPriorityConfigJSON(...) failed: %v", err) + } + + var prettyGot bytes.Buffer + if err := json.Indent(&prettyGot, gotConfig, ">>> ", " "); err != nil { + t.Fatalf("json.Indent() failed: %v", err) + } + // Print the indented json if this test fails. + t.Log(prettyGot.String()) + + priorityB := balancer.Get(priority.Name) + if _, err = priorityB.(balancer.ConfigParser).ParseConfig(gotConfig); err != nil { + t.Fatalf("ParseConfig(%+v) failed: %v", gotConfig, err) + } +} + +// TestBuildPriorityConfig tests the priority config generation. Each top level +// balancer per priority should be an Outlier Detection balancer, with a Cluster +// Impl Balancer as a child. +func TestBuildPriorityConfig(t *testing.T) { + gotConfig, _, _ := buildPriorityConfig([]priorityConfig{ + { + // EDS - OD config should be the top level for both of the EDS + // priorities balancer This EDS priority will have multiple sub + // priorities. The Outlier Detection configuration specified in the + // Discovery Mechanism should be the top level for each sub + // priorities balancer. + mechanism: DiscoveryMechanism{ + Cluster: testClusterName, + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, + outlierDetection: noopODCfg, + }, + edsResp: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + testLocalitiesP0[0], + testLocalitiesP0[1], + testLocalitiesP1[0], + testLocalitiesP1[1], + }, + }, + childNameGen: newNameGenerator(0), + }, + { + // This OD config should wrap the Logical DNS priorities balancer. + mechanism: DiscoveryMechanism{ + Cluster: testClusterName2, + Type: DiscoveryMechanismTypeLogicalDNS, + outlierDetection: noopODCfg, + }, + addresses: testAddressStrs[4], + childNameGen: newNameGenerator(1), + }, + }, nil) + + wantConfig := &priority.LBConfig{ + Children: map[string]*priority.Child{ + "priority-0-0": { + Config: &iserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + DropCategories: []clusterimpl.DropConfig{}, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + "priority-0-1": { + Config: &iserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + DropCategories: []clusterimpl.DropConfig{}, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + "priority-1": { + Config: &iserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: testClusterName2, + ChildPolicy: &iserviceconfig.BalancerConfig{Name: "pick_first"}, + }, + }, + }, + }, + IgnoreReresolutionRequests: false, + }, + }, + Priorities: []string{"priority-0-0", "priority-0-1", "priority-1"}, + } + if diff := cmp.Diff(gotConfig, wantConfig); diff != "" { + t.Errorf("buildPriorityConfig() diff (-got +want) %v", diff) + } +} + +func TestBuildClusterImplConfigForDNS(t *testing.T) { + gotName, gotConfig, gotAddrs := buildClusterImplConfigForDNS(newNameGenerator(3), testAddressStrs[0], DiscoveryMechanism{Cluster: testClusterName2, Type: DiscoveryMechanismTypeLogicalDNS}) + wantName := "priority-3" + wantConfig := &clusterimpl.LBConfig{ + Cluster: testClusterName2, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "pick_first", + }, + } + wantAddrs := []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][0]}, []string{"priority-3"}), + hierarchy.Set(resolver.Address{Addr: testAddressStrs[0][1]}, []string{"priority-3"}), + } + + if diff := cmp.Diff(gotName, wantName); diff != "" { + t.Errorf("buildClusterImplConfigForDNS() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotConfig, wantConfig); diff != "" { + t.Errorf("buildClusterImplConfigForDNS() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotAddrs, wantAddrs, addrCmpOpts); diff != "" { + t.Errorf("buildClusterImplConfigForDNS() diff (-got +want) %v", diff) + } +} + +func TestBuildClusterImplConfigForEDS(t *testing.T) { + gotNames, gotConfigs, gotAddrs, _ := buildClusterImplConfigForEDS( + newNameGenerator(2), + xdsresource.EndpointsUpdate{ + Drops: []xdsresource.OverloadDropConfig{ + { + Category: testDropCategory, + Numerator: testDropOverMillion, + Denominator: million, + }, + }, + Localities: []xdsresource.Locality{ + { + Endpoints: testEndpoints[3], + ID: testLocalityIDs[3], + Weight: 80, + Priority: 1, + }, { + Endpoints: testEndpoints[1], + ID: testLocalityIDs[1], + Weight: 80, + Priority: 0, + }, { + Endpoints: testEndpoints[2], + ID: testLocalityIDs[2], + Weight: 20, + Priority: 1, + }, { + Endpoints: testEndpoints[0], + ID: testLocalityIDs[0], + Weight: 20, + Priority: 0, + }, + }, + }, + DiscoveryMechanism{ + Cluster: testClusterName, + MaxConcurrentRequests: newUint32(testMaxRequests), + LoadReportingServer: testLRSServerConfig, + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSServiceName, + }, + nil, + ) + + wantNames := []string{ + fmt.Sprintf("priority-%v-%v", 2, 0), + fmt.Sprintf("priority-%v-%v", 2, 1), + } + wantConfigs := map[string]*clusterimpl.LBConfig{ + "priority-2-0": { + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + DropCategories: []clusterimpl.DropConfig{ + { + Category: testDropCategory, + RequestsPerMillion: testDropOverMillion, + }, + }, + }, + "priority-2-1": { + Cluster: testClusterName, + EDSServiceName: testEDSServiceName, + LoadReportingServer: testLRSServerConfig, + MaxConcurrentRequests: newUint32(testMaxRequests), + DropCategories: []clusterimpl.DropConfig{ + { + Category: testDropCategory, + RequestsPerMillion: testDropOverMillion, + }, + }, + }, + } + wantAddrs := []resolver.Address{ + testAddrWithAttrs(testAddressStrs[0][0], 20, 1, "priority-2-0", &testLocalityIDs[0]), + testAddrWithAttrs(testAddressStrs[0][1], 20, 1, "priority-2-0", &testLocalityIDs[0]), + testAddrWithAttrs(testAddressStrs[1][0], 80, 1, "priority-2-0", &testLocalityIDs[1]), + testAddrWithAttrs(testAddressStrs[1][1], 80, 1, "priority-2-0", &testLocalityIDs[1]), + testAddrWithAttrs(testAddressStrs[2][0], 20, 1, "priority-2-1", &testLocalityIDs[2]), + testAddrWithAttrs(testAddressStrs[2][1], 20, 1, "priority-2-1", &testLocalityIDs[2]), + testAddrWithAttrs(testAddressStrs[3][0], 80, 1, "priority-2-1", &testLocalityIDs[3]), + testAddrWithAttrs(testAddressStrs[3][1], 80, 1, "priority-2-1", &testLocalityIDs[3]), + } + + if diff := cmp.Diff(gotNames, wantNames); diff != "" { + t.Errorf("buildClusterImplConfigForEDS() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotConfigs, wantConfigs); diff != "" { + t.Errorf("buildClusterImplConfigForEDS() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(gotAddrs, wantAddrs, addrCmpOpts); diff != "" { + t.Errorf("buildClusterImplConfigForEDS() diff (-got +want) %v", diff) + } + +} + +func TestGroupLocalitiesByPriority(t *testing.T) { + tests := []struct { + name string + localities []xdsresource.Locality + wantLocalities [][]xdsresource.Locality + }{ + { + name: "1 locality 1 priority", + localities: []xdsresource.Locality{testLocalitiesP0[0]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[0]}, + }, + }, + { + name: "2 locality 1 priority", + localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP0[1]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[0], testLocalitiesP0[1]}, + }, + }, + { + name: "1 locality in each", + localities: []xdsresource.Locality{testLocalitiesP0[0], testLocalitiesP1[0]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[0]}, + {testLocalitiesP1[0]}, + }, + }, + { + name: "2 localities in each sorted", + localities: []xdsresource.Locality{ + testLocalitiesP0[0], testLocalitiesP0[1], + testLocalitiesP1[0], testLocalitiesP1[1]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[0], testLocalitiesP0[1]}, + {testLocalitiesP1[0], testLocalitiesP1[1]}, + }, + }, + { + // The localities are given in order [p1, p0, p1, p0], but the + // returned priority list must be sorted [p0, p1], because the list + // order is the priority order. + name: "2 localities in each needs to sort", + localities: []xdsresource.Locality{ + testLocalitiesP1[1], testLocalitiesP0[1], + testLocalitiesP1[0], testLocalitiesP0[0]}, + wantLocalities: [][]xdsresource.Locality{ + {testLocalitiesP0[1], testLocalitiesP0[0]}, + {testLocalitiesP1[1], testLocalitiesP1[0]}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotLocalities := groupLocalitiesByPriority(tt.localities) + if diff := cmp.Diff(gotLocalities, tt.wantLocalities); diff != "" { + t.Errorf("groupLocalitiesByPriority() diff(-got +want) %v", diff) + } + }) + } +} + +func TestDedupSortedIntSlice(t *testing.T) { + tests := []struct { + name string + a []int + want []int + }{ + { + name: "empty", + a: []int{}, + want: []int{}, + }, + { + name: "no dup", + a: []int{0, 1, 2, 3}, + want: []int{0, 1, 2, 3}, + }, + { + name: "with dup", + a: []int{0, 0, 1, 1, 1, 2, 3}, + want: []int{0, 1, 2, 3}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := dedupSortedIntSlice(tt.a); !cmp.Equal(got, tt.want) { + t.Errorf("dedupSortedIntSlice() = %v, want %v, diff %v", got, tt.want, cmp.Diff(got, tt.want)) + } + }) + } +} + +func TestPriorityLocalitiesToClusterImpl(t *testing.T) { + tests := []struct { + name string + localities []xdsresource.Locality + priorityName string + mechanism DiscoveryMechanism + childPolicy *iserviceconfig.BalancerConfig + wantConfig *clusterimpl.LBConfig + wantAddrs []resolver.Address + wantErr bool + }{{ + name: "round robin as child, no LRS", + localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + childPolicy: &iserviceconfig.BalancerConfig{Name: roundrobin.Name}, + mechanism: DiscoveryMechanism{ + Cluster: testClusterName, + Type: DiscoveryMechanismTypeEDS, + EDSServiceName: testEDSService, + }, + // lrsServer is nil, so LRS policy will not be used. + wantConfig: &clusterimpl.LBConfig{ + Cluster: testClusterName, + EDSServiceName: testEDSService, + ChildPolicy: &iserviceconfig.BalancerConfig{Name: roundrobin.Name}, + }, + wantAddrs: []resolver.Address{ + testAddrWithAttrs("addr-1-1", 20, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", 20, 10, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", 80, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", 80, 10, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + }, + }, + { + name: "ring_hash as child", + localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-1-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-1-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-1"}, + Weight: 20, + }, + { + Endpoints: []xdsresource.Endpoint{ + {Address: "addr-2-1", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 90}, + {Address: "addr-2-2", HealthStatus: xdsresource.EndpointHealthStatusHealthy, Weight: 10}, + }, + ID: internal.LocalityID{Zone: "test-zone-2"}, + Weight: 80, + }, + }, + priorityName: "test-priority", + childPolicy: &iserviceconfig.BalancerConfig{Name: ringhash.Name, Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}}, + // lrsServer is nil, so LRS policy will not be used. + wantConfig: &clusterimpl.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: ringhash.Name, + Config: &ringhash.LBConfig{MinRingSize: 1, MaxRingSize: 2}, + }, + }, + wantAddrs: []resolver.Address{ + testAddrWithAttrs("addr-1-1", 20, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-1-2", 20, 10, "test-priority", &internal.LocalityID{Zone: "test-zone-1"}), + testAddrWithAttrs("addr-2-1", 80, 90, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + testAddrWithAttrs("addr-2-2", 80, 10, "test-priority", &internal.LocalityID{Zone: "test-zone-2"}), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := priorityLocalitiesToClusterImpl(tt.localities, tt.priorityName, tt.mechanism, nil, tt.childPolicy) + if (err != nil) != tt.wantErr { + t.Fatalf("priorityLocalitiesToClusterImpl() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(got, tt.wantConfig); diff != "" { + t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) + } + if diff := cmp.Diff(got1, tt.wantAddrs, cmp.AllowUnexported(attributes.Attributes{})); diff != "" { + t.Errorf("localitiesToWeightedTarget() diff (-got +want) %v", diff) + } + }) + } +} + +func assertString(f func() (string, error)) string { + s, err := f() + if err != nil { + panic(err.Error()) + } + return s +} + +func testAddrWithAttrs(addrStr string, localityWeight, endpointWeight uint32, priority string, lID *internal.LocalityID) resolver.Address { + addr := resolver.Address{Addr: addrStr} + path := []string{priority} + if lID != nil { + path = append(path, assertString(lID.ToString)) + addr = internal.SetLocalityID(addr, *lID) + } + addr = hierarchy.Set(addr, path) + addr = wrrlocality.SetAddrInfo(addr, wrrlocality.AddrInfo{LocalityWeight: localityWeight}) + addr = weightedroundrobin.SetAddrInfo(addr, weightedroundrobin.AddrInfo{Weight: localityWeight * endpointWeight}) + return addr +} + +func TestConvertClusterImplMapToOutlierDetection(t *testing.T) { + tests := []struct { + name string + ciCfgsMap map[string]*clusterimpl.LBConfig + odCfg outlierdetection.LBConfig + wantODCfgs map[string]*outlierdetection.LBConfig + }{ + { + name: "single-entry-noop", + ciCfgsMap: map[string]*clusterimpl.LBConfig{ + "child1": { + Cluster: "cluster1", + }, + }, + odCfg: outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + }, + wantODCfgs: map[string]*outlierdetection.LBConfig{ + "child1": { + Interval: 1<<63 - 1, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: "cluster1", + }, + }, + }, + }, + }, + { + name: "multiple-entries-noop", + ciCfgsMap: map[string]*clusterimpl.LBConfig{ + "child1": { + Cluster: "cluster1", + }, + "child2": { + Cluster: "cluster2", + }, + }, + odCfg: outlierdetection.LBConfig{ + Interval: 1<<63 - 1, + }, + wantODCfgs: map[string]*outlierdetection.LBConfig{ + "child1": { + Interval: 1<<63 - 1, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: "cluster1", + }, + }, + }, + "child2": { + Interval: 1<<63 - 1, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: "cluster2", + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := convertClusterImplMapToOutlierDetection(test.ciCfgsMap, test.odCfg) + if diff := cmp.Diff(got, test.wantODCfgs); diff != "" { + t.Fatalf("convertClusterImplMapToOutlierDetection() diff(-got +want) %v", diff) + } + }) + } +} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go b/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go new file mode 100644 index 000000000000..b4740eea6d0b --- /dev/null +++ b/xds/internal/balancer/clusterresolver/e2e_test/aggregate_cluster_test.go @@ -0,0 +1,1067 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e_test + +import ( + "context" + "fmt" + "sort" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +// makeAggregateClusterResource returns an aggregate cluster resource with the +// given name and list of child names. +func makeAggregateClusterResource(name string, childNames []string) *v3clusterpb.Cluster { + return &v3clusterpb.Cluster{ + Name: name, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: childNames, + }), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + } +} + +// makeLogicalDNSClusterResource returns a LOGICAL_DNS cluster resource with the +// given name and given DNS host and port. +func makeLogicalDNSClusterResource(name, dnsHost string, dnsPort uint32) *v3clusterpb.Cluster { + return &v3clusterpb.Cluster{ + Name: name, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ + Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ + LbEndpoints: []*v3endpointpb.LbEndpoint{{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: dnsHost, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: dnsPort, + }, + }, + }, + }, + }, + }, + }}, + }}, + }, + } +} + +// setupDNS unregisters the DNS resolver and registers a manual resolver for the +// same scheme. This allows the test to mock the DNS resolution by supplying the +// addresses of the test backends. +// +// Returns the following: +// - a channel onto which the DNS target being resolved is written to by the +// mock DNS resolver +// - a channel to notify close of the DNS resolver +// - a channel to notify re-resolution requests to the DNS resolver +// - a manual resolver which is used to mock the actual DNS resolution +// - a cleanup function which re-registers the original DNS resolver +func setupDNS() (chan resolver.Target, chan struct{}, chan resolver.ResolveNowOptions, *manual.Resolver, func()) { + targetCh := make(chan resolver.Target, 1) + closeCh := make(chan struct{}, 1) + resolveNowCh := make(chan resolver.ResolveNowOptions, 1) + + mr := manual.NewBuilderWithScheme("dns") + mr.BuildCallback = func(target resolver.Target, _ resolver.ClientConn, _ resolver.BuildOptions) { targetCh <- target } + mr.CloseCallback = func() { closeCh <- struct{}{} } + mr.ResolveNowCallback = func(opts resolver.ResolveNowOptions) { resolveNowCh <- opts } + + dnsResolverBuilder := resolver.Get("dns") + resolver.UnregisterForTesting("dns") + resolver.Register(mr) + + return targetCh, closeCh, resolveNowCh, mr, func() { resolver.Register(dnsResolverBuilder) } +} + +// TestAggregateCluster_WithTwoEDSClusters tests the case where the top-level +// cluster resource is an aggregate cluster. It verifies that RPCs fail when the +// management server has not responded to all requested EDS resources, and also +// that RPCs are routed to the highest priority cluster once all requested EDS +// resources have been sent by the management server. +func (s) TestAggregateCluster_WithTwoEDSClusters(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server that pushes the EDS resource names onto a + // channel when requested. + edsResourceNameCh := make(chan []string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) == 0 { + // This is the case for ACKs. Do nothing here. + return nil + } + select { + case edsResourceNameCh <- req.GetResourceNames(): + case <-ctx.Done(): + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend belongs to EDS cluster "cluster-1", while the second backend + // belongs to EDS cluster "cluster-2". + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster, two EDS clusters and only one endpoints + // resource (corresponding to the first EDS cluster) in the management + // server. + const clusterName1 = clusterName + "-cluster-1" + const clusterName2 = clusterName + "-cluster-2" + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName1, clusterName2}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(clusterName1, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Wait for both EDS resources to be requested. + func() { + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + select { + case names := <-edsResourceNameCh: + // Copy and sort the sortedNames to avoid racing with an + // OnStreamRequest call. + sortedNames := make([]string, len(names)) + copy(sortedNames, names) + sort.Strings(sortedNames) + if cmp.Equal(sortedNames, []string{clusterName1, clusterName2}) { + return + } + default: + } + } + }() + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for all EDS resources %v to be requested", []string{clusterName1, clusterName2}) + } + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the management server has not responded with all EDS resources + // requested. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Update the management server with the second EDS resource. + resources.Endpoints = append(resources.Endpoints, e2e.DefaultEndpoint(clusterName2, "localhost", []uint32{uint32(ports[1])})) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Make an RPC and ensure that it gets routed to cluster-1, implicitly + // higher priority than cluster-2. + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_WithTwoEDSClusters_PrioritiesChange tests the case where +// the top-level cluster resource is an aggregate cluster. It verifies that RPCs +// are routed to the highest priority EDS cluster. +func (s) TestAggregateCluster_WithTwoEDSClusters_PrioritiesChange(t *testing.T) { + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend belongs to EDS cluster "cluster-1", while the second backend + // belongs to EDS cluster "cluster-2". + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster, two EDS clusters and the corresponding + // endpoints resources in the management server. + const clusterName1 = clusterName + "cluster-1" + const clusterName2 = clusterName + "cluster-2" + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName1, clusterName2}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(clusterName1, "localhost", []uint32{uint32(ports[0])}), + e2e.DefaultEndpoint(clusterName2, "localhost", []uint32{uint32(ports[1])}), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC and ensure that it gets routed to cluster-1, implicitly + // higher priority than cluster-2. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Swap the priorities of the EDS clusters in the aggregate cluster. + resources.Clusters = []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{clusterName2, clusterName1}), + e2e.DefaultCluster(clusterName1, "", e2e.SecurityLevelNone), + e2e.DefaultCluster(clusterName2, "", e2e.SecurityLevelNone), + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for RPCs to get routed to cluster-2, which is now implicitly higher + // priority than cluster-1, after the priority switch above. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatal("Timeout waiting for RPCs to be routed to cluster-2 after priority switch") + } +} + +// TestAggregateCluster_WithOneDNSCluster tests the case where the top-level +// cluster resource is an aggregate cluster that resolves to a single +// LOGICAL_DNS cluster. The test verifies that RPCs can be made to backends that +// make up the LOGICAL_DNS cluster. +func (s) TestAggregateCluster_WithOneDNSCluster(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, _ := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to a single LOGICAL_DNS cluster. + const ( + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{dnsClusterName}), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs}) + + // Make an RPC and ensure that it gets routed to the first backend since the + // child policy for a LOGICAL_DNS cluster is pick_first by default. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_WithEDSAndDNS tests the case where the top-level cluster +// resource is an aggregate cluster that resolves to an EDS and a LOGICAL_DNS +// cluster. The test verifies that RPCs fail until both clusters are resolved to +// endpoints, and RPCs are routed to the higher priority EDS cluster. +func (s) TestAggregateCluster_WithEDSAndDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server that pushes the name of the requested EDS + // resource onto a channel. + edsResourceCh := make(chan string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) > 0 { + select { + case edsResourceCh <- req.GetResourceNames()[0]: + default: + } + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. Also + // configure an endpoints resource for the EDS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsClusterName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that an EDS request is sent for the expected resource name. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case name := <-edsResourceCh: + if name != edsClusterName { + t.Fatalf("Received EDS request with resource name %q, want %q", name, edsClusterName) + } + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the DNS resolver has not responded with endpoint addresses. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Make an RPC and ensure that it gets routed to the first backend since the + // EDS cluster is of higher priority than the LOGICAL_DNS cluster. + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } +} + +// TestAggregateCluster_SwitchEDSAndDNS tests the case where the top-level +// cluster resource is an aggregate cluster. It initially resolves to a single +// EDS cluster. The test verifies that RPCs are routed to backends in the EDS +// cluster. Subsequently, the aggregate cluster resolves to a single DNS +// cluster. The test verifies that RPCs are successful, this time to backends in +// the DNS cluster. +func (s) TestAggregateCluster_SwitchEDSAndDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to a single EDS cluster. Also, + // configure the underlying EDS cluster (and the corresponding endpoints + // resource) and DNS cluster (will be used later in the test). + const ( + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsServiceName}), + e2e.DefaultCluster(edsServiceName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the RPC is routed to the appropriate backend. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Update the aggregate cluster to point to a single DNS cluster. + resources.Clusters = []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{dnsClusterName}), + e2e.DefaultCluster(edsServiceName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Ensure that start getting routed to the backend corresponding to the + // LOGICAL_DNS cluster. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)) + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for RPCs to be routed to backend %q in the DNS cluster", addrs[1].Addr) + } +} + +// TestAggregateCluster_BadEDS_GoodToBadDNS tests the case where the top-level +// cluster is an aggregate cluster that resolves to an EDS and LOGICAL_DNS +// cluster. The test first asserts that no RPCs can be made after receiving an +// EDS response with zero endpoints because no update has been received from the +// DNS resolver yet. Once the DNS resolver pushes an update, the test verifies +// that we switch to the DNS cluster and can make a successful RPC. At this +// point when the DNS cluster returns an error, the test verifies that RPCs are +// still successful. This is the expected behavior because pick_first (the leaf +// policy) ignores resolver errors when it is not in TransientFailure. +func (s) TestAggregateCluster_BadEDS_GoodToBadDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, _ := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and LOGICAL_DNS + // cluster. Also configure an empty endpoints resource for the EDS cluster + // that contains no endpoints. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + emptyEndpointResource := e2e.DefaultEndpoint(edsServiceName, "localhost", nil) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, edsServiceName, e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{emptyEndpointResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the EDS resource came back with no endpoints, and we are yet to + // push an update through the DNS resolver. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs}) + + // Ensure that RPCs start getting routed to the first backend since the + // child policy for a LOGICAL_DNS cluster is pick_first by default. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Logf("EmptyCall() failed: %v", err) + continue + } + if peer.Addr.String() == addrs[0].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for RPCs to be routed to backend %q in the DNS cluster", addrs[0].Addr) + } + + // Push an error from the DNS resolver as well. + dnsErr := fmt.Errorf("DNS error") + dnsR.ReportError(dnsErr) + + // Ensure that RPCs continue to succeed for the next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + } +} + +// TestAggregateCluster_BadEDS_BadDNS tests the case where the top-level cluster +// is an aggregate cluster that resolves to an EDS and LOGICAL_DNS cluster. When +// the EDS request returns a resource that contains no endpoints, the test +// verifies that we switch to the DNS cluster. When the DNS cluster returns an +// error, the test verifies that RPCs fail with the error returned by the DNS +// resolver, and thus, ensures that pick_first (the leaf policy) does not ignore +// resolver errors. +func (s) TestAggregateCluster_BadEDS_BadDNS(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + managementServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Configure an aggregate cluster pointing to an EDS and LOGICAL_DNS + // cluster. Also configure an empty endpoints resource for the EDS cluster + // that contains no endpoints. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + emptyEndpointResource := e2e.DefaultEndpoint(edsServiceName, "localhost", nil) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, edsServiceName, e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{emptyEndpointResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the EDS resource came back with no endpoints, and we are yet to + // push an update through the DNS resolver. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Push an error from the DNS resolver as well. + dnsErr := fmt.Errorf("DNS error") + dnsR.ReportError(dnsErr) + + // Ensure that the error returned from the DNS resolver is reported to the + // caller of the RPC. + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if code := status.Code(err); code != codes.Unavailable { + t.Fatalf("EmptyCall() failed with code %s, want %s", code, codes.Unavailable) + } + if err == nil || !strings.Contains(err.Error(), dnsErr.Error()) { + t.Fatalf("EmptyCall() failed with error %v, want %v", err, dnsErr) + } +} + +// TestAggregateCluster_NoFallback_EDSNackedWithPreviousGoodUpdate tests the +// scenario where the top-level cluster is an aggregate cluster that resolves to +// an EDS and LOGICAL_DNS cluster. The management server first sends a good EDS +// response for the EDS cluster and the test verifies that RPCs get routed to +// the EDS cluster. The management server then sends a bad EDS response. The +// test verifies that the cluster_resolver LB policy continues to use the +// previously received good update and that RPCs still get routed to the EDS +// cluster. +func (s) TestAggregateCluster_NoFallback_EDSNackedWithPreviousGoodUpdate(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + mgmtServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. Also + // configure an endpoints resource for the EDS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsClusterName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Make an RPC and ensure that it gets routed to the first backend since the + // EDS cluster is of higher priority than the LOGICAL_DNS cluster. + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Push an EDS resource from the management server that is expected to be + // NACKed by the xDS client. Since the cluster_resolver LB policy has a + // previously received good EDS resource, it will continue to use that. + resources.Endpoints[0].Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to get routed to the EDS cluster for the next + // second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + } +} + +// TestAggregateCluster_Fallback_EDSNackedWithoutPreviousGoodUpdate tests the +// scenario where the top-level cluster is an aggregate cluster that resolves to +// an EDS and LOGICAL_DNS cluster. The management server sends a bad EDS +// response. The test verifies that the cluster_resolver LB policy falls back to +// the LOGICAL_DNS cluster, because it is supposed to treat the bad EDS response +// as though it received an update with no endpoints. +func (s) TestAggregateCluster_Fallback_EDSNackedWithoutPreviousGoodUpdate(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + mgmtServer, nodeID, bootstrapContents, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS cluster and the second backend is used for + // the LOGICAL_DNS cluster. + servers, cleanup3 := startTestServiceBackends(t, 2) + defer cleanup3() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsClusterName, "localhost", []uint32{uint32(ports[0])})}, + SkipValidation: true, + } + + // Set a load balancing weight of 0 for the backend in the EDS resource. + // This is expected to be NACKed by the xDS client. Since the + // cluster_resolver LB policy has no previously received good EDS resource, + // it will treat this as though it received an update with no endpoints. + resources.Endpoints[0].Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: addrs[1:]}) + + // Make an RPC and ensure that it gets routed to the LOGICAL_DNS cluster. + peer := &peer.Peer{} + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[1].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[1].Addr) + } +} + +// TestAggregateCluster_Fallback_EDS_ResourceNotFound tests the scenario where +// the top-level cluster is an aggregate cluster that resolves to an EDS and +// LOGICAL_DNS cluster. The management server does not respond with the EDS +// cluster. The test verifies that the cluster_resolver LB policy falls back to +// the LOGICAL_DNS cluster in this case. +func (s) TestAggregateCluster_Fallback_EDS_ResourceNotFound(t *testing.T) { + dnsTargetCh, _, _, dnsR, cleanup1 := setupDNS() + defer cleanup1() + + // Start an xDS management server. + mgmtServer, nodeID, _, _, cleanup2 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup2() + + // Start a test backend for the LOGICAL_DNS cluster. + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure an aggregate cluster pointing to an EDS and DNS cluster. No + // endpoints are configured for the EDS cluster. + const ( + edsClusterName = clusterName + "-eds" + dnsClusterName = clusterName + "-dns" + dnsHostName = "dns_host" + dnsPort = uint32(8080) + ) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + makeAggregateClusterResource(clusterName, []string{edsClusterName, dnsClusterName}), + e2e.DefaultCluster(edsClusterName, "", e2e.SecurityLevelNone), + makeLogicalDNSClusterResource(dnsClusterName, dnsHostName, dnsPort), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client talking to the above management server, configured + // with a short watch expiry timeout. + xdsClient, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cds LB policy as the top-level LB policy, and a corresponding config + // with a single cluster. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cds_experimental":{ + "cluster": "%s" + } + }] + }`, clusterName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Make an RPC with a short deadline. We expect this RPC to not succeed + // because the DNS resolver has not responded with endpoint addresses. + client := testgrpc.NewTestServiceClient(cc) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); status.Code(err) != codes.DeadlineExceeded { + t.Fatalf("EmptyCall() code %s, want %s", status.Code(err), codes.DeadlineExceeded) + } + + // Ensure that the DNS resolver is started for the expected target. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for DNS resolver to be started") + case target := <-dnsTargetCh: + got, want := target.Endpoint(), fmt.Sprintf("%s:%d", dnsHostName, dnsPort) + if got != want { + t.Fatalf("DNS resolution started for target %q, want %q", got, want) + } + } + + // Update DNS resolver with test backend addresses. + dnsR.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: server.Address}}}) + + // Make an RPC and ensure that it gets routed to the LOGICAL_DNS cluster. + // Even though the EDS cluster is of higher priority, since the management + // server does not respond with an EDS resource, the cluster_resolver LB + // policy is expected to fallback to the LOGICAL_DNS cluster once the watch + // timeout expires. + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer), grpc.WaitForReady(true)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != server.Address { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, server.Address) + } +} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go new file mode 100644 index 000000000000..69b7c51cf8fa --- /dev/null +++ b/xds/internal/balancer/clusterresolver/e2e_test/balancer_test.go @@ -0,0 +1,527 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/buffer" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" + "google.golang.org/grpc/xds/internal/balancer/outlierdetection" + "google.golang.org/grpc/xds/internal/balancer/priority" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the "cds_experimental" LB policy. +) + +// setupAndDial performs common setup across all tests +// +// - creates an xDS client with the passed in bootstrap contents +// - creates a manual resolver that configures `cds_experimental` as the +// top-level LB policy. +// - creates a ClientConn to talk to the test backends +// +// Returns a function to close the ClientConn and the xDS client. +func setupAndDial(t *testing.T, bootstrapContents []byte) (*grpc.ClientConn, func()) { + t.Helper() + + // Create an xDS client for use by the cluster_resolver LB policy. + xdsC, xdsClose, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + + // Create a manual resolver and push a service config specifying the use of + // the cds LB policy as the top-level LB policy, and a corresponding config + // with a single cluster. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cds_experimental":{ + "cluster": "%s" + } + }] + }`, clusterName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsC)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + xdsClose() + t.Fatalf("Failed to dial local test server: %v", err) + } + return cc, func() { + xdsClose() + cc.Close() + } +} + +// TestErrorFromParentLB_ConnectionError tests the case where the parent of the +// clusterresolver LB policy sends its a connection error. The parent policy, +// CDS LB policy, sends a connection error when the ADS stream to the management +// server breaks. The test verifies that there is no perceivable effect because +// of this connection error, and that RPCs continue to work (because the LB +// policies are expected to use previously received xDS resources). +func (s) TestErrorFromParentLB_ConnectionError(t *testing.T) { + // Create a listener to be used by the management server. The test will + // close this listener to simulate ADS stream breakage. + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + // Start an xDS management server with the above restartable listener, and + // push a channel when the stream is closed. + streamClosedCh := make(chan struct{}, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + Listener: lis, + OnStreamClosed: func(int64, *v3corepb.Node) { + select { + case streamClosedCh <- struct{}{}: + default: + } + }, + }) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + // Close the listener and ensure that the ADS stream breaks. + lis.Close() + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for ADS stream to close") + default: + } + + // Ensure that RPCs continue to succeed for the next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + } +} + +// TestErrorFromParentLB_ResourceNotFound tests the case where the parent of the +// clusterresolver LB policy sends it a resource-not-found error. The parent +// policy, CDS LB policy, sends a resource-not-found error when the cluster +// resource associated with these LB policies is removed by the management +// server. The test verifies that the associated EDS is canceled and RPCs fail. +// It also ensures that when the Cluster resource is added back, the EDS +// resource is re-requested and RPCs being to succeed. +func (s) TestErrorFromParentLB_ResourceNotFound(t *testing.T) { + // Start an xDS management server that uses a couple of channels to + // notify the test about the following events: + // - an EDS requested with the expected resource name is requested + // - EDS resource is unrequested, i.e, an EDS request with no resource name + // is received, which indicates that we are not longer interested in that + // resource. + edsResourceRequestedCh := make(chan struct{}, 1) + edsResourceCanceledCh := make(chan struct{}, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() == version.V3EndpointsURL { + switch len(req.GetResourceNames()) { + case 0: + select { + case edsResourceCanceledCh <- struct{}{}: + default: + } + case 1: + if req.GetResourceNames()[0] == edsServiceName { + select { + case edsResourceRequestedCh <- struct{}{}: + default: + } + } + default: + t.Errorf("Unexpected number of resources, %d, in an EDS request", len(req.GetResourceNames())) + } + } + return nil + }, + }) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + // Wait for the EDS resource to be requested. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS resource to be requested") + case <-edsResourceRequestedCh: + } + + // Ensure that a successful RPC can be made. + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + // Delete the cluster resource from the mangement server. + resources.Clusters = nil + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for the EDS resource to be not requested anymore. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS resource to not requested") + case <-edsResourceCanceledCh: + } + + // Ensure that RPCs start to fail with expected error. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + _, err := client.EmptyCall(sCtx, &testpb.Empty{}) + if status.Code(err) == codes.Unavailable && strings.Contains(err.Error(), "all priorities are removed") { + break + } + if err != nil { + t.Logf("EmptyCall failed: %v", err) + } + } + if ctx.Err() != nil { + t.Fatalf("RPCs did not fail after removal of Cluster resource") + } + + // Ensure that the ClientConn is in TransientFailure. + if state := cc.GetState(); state != connectivity.TransientFailure { + t.Fatalf("Unexpected connectivity state for ClientConn, got: %s, want %s", state, connectivity.TransientFailure) + } + + // Configure cluster and endpoints resources in the management server. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for the EDS resource to be requested again. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS resource to be requested") + case <-edsResourceRequestedCh: + } + + // Ensure that a successful RPC can be made. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := client.EmptyCall(sCtx, &testpb.Empty{}); err != nil { + t.Logf("EmptyCall failed: %v", err) + continue + } + break + } + if ctx.Err() != nil { + t.Fatalf("RPCs did not fail after removal of Cluster resource") + } +} + +// wrappedPriorityBuilder implements the balancer.Builder interface and builds +// an LB policy which is a thin wrapper around the priority LB policy. The built +// LB policy and makes certain events available to the test (SubConn state +// changes and LB config updates). +type wrappedPriorityBuilder struct { + balancer.Builder + balancer.ConfigParser + // We use an unbounded buffer instead of a vanilla channel to ensure that no + // state updates are lost *and* pushing to the channel is non-blocking (to + // ensure that the sending goroutine does not block if the test is not + // reading from the channel). + scStateCh *buffer.Unbounded + lbCfgCh chan serviceconfig.LoadBalancingConfig +} + +func newWrappedPriorityBuilder(b balancer.Builder) *wrappedPriorityBuilder { + return &wrappedPriorityBuilder{ + scStateCh: buffer.NewUnbounded(), + lbCfgCh: make(chan serviceconfig.LoadBalancingConfig, 1), + Builder: b, + ConfigParser: b.(balancer.ConfigParser), + } +} + +func (b *wrappedPriorityBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + priorityLB := b.Builder.Build(cc, opts) + return &wrappedPriorityBalancer{ + Balancer: priorityLB, + scStateCh: b.scStateCh, + lbCfgCh: b.lbCfgCh, + } +} + +type wrappedPriorityBalancer struct { + balancer.Balancer + scStateCh *buffer.Unbounded + lbCfgCh chan serviceconfig.LoadBalancingConfig +} + +func (b *wrappedPriorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.scStateCh.Put(state) + b.Balancer.UpdateSubConnState(sc, state) +} + +func (b *wrappedPriorityBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + select { + case b.lbCfgCh <- ccs.BalancerConfig: + default: + } + return b.Balancer.UpdateClientConnState(ccs) +} + +func (b *wrappedPriorityBalancer) Close() { + b.scStateCh.Close() + b.Balancer.Close() +} + +// Test verifies that SubConn state changes are propagated to the child policy +// by the cluster resolver LB policy. +func (s) TestSubConnStateChangePropagationToChildPolicy(t *testing.T) { + // Unregister the priority balancer builder for the duration of this test, + // and register a policy under the same name that makes SubConn state + // changes pushed to it available to the test. + priorityBuilder := balancer.Get(priority.Name) + internal.BalancerUnregister(priorityBuilder.Name()) + testChildPolicy := newWrappedPriorityBuilder(priorityBuilder) + balancer.Register(testChildPolicy) + defer balancer.Register(priorityBuilder) + + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + for { + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for child policy to see a READY SubConn") + case s := <-testChildPolicy.scStateCh.Get(): + testChildPolicy.scStateCh.Load() + state := s.(balancer.SubConnState) + if state.ConnectivityState == connectivity.Ready { + return + } + } + } +} + +// Test verifies that when the received Cluster resource contains outlier +// detection configuration, the LB config pushed to the child policy contains +// the appropriate configuration for the outlier detection LB policy. +func (s) TestOutlierDetectionConfigPropagationToChildPolicy(t *testing.T) { + // Unregister the priority balancer builder for the duration of this test, + // and register a policy under the same name that makes the LB config + // pushed to it available to the test. + priorityBuilder := balancer.Get(priority.Name) + internal.BalancerUnregister(priorityBuilder.Name()) + testChildPolicy := newWrappedPriorityBuilder(priorityBuilder) + balancer.Register(testChildPolicy) + defer balancer.Register(priorityBuilder) + + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources in the management server. + cluster := e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone) + cluster.OutlierDetection = &v3clusterpb.OutlierDetection{ + Interval: durationpb.New(10 * time.Second), + BaseEjectionTime: durationpb.New(30 * time.Second), + MaxEjectionTime: durationpb.New(300 * time.Second), + MaxEjectionPercent: wrapperspb.UInt32(10), + SuccessRateStdevFactor: wrapperspb.UInt32(2000), + EnforcingSuccessRate: wrapperspb.UInt32(50), + SuccessRateMinimumHosts: wrapperspb.UInt32(10), + SuccessRateRequestVolume: wrapperspb.UInt32(50), + } + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{cluster}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + _, cleanup = setupAndDial(t, bootstrapContents) + defer cleanup() + + // The priority configuration generated should have Outlier Detection as a + // direct child due to Outlier Detection being turned on. + wantCfg := &priority.LBConfig{ + Children: map[string]*priority.Child{ + "priority-0-0": { + Config: &iserviceconfig.BalancerConfig{ + Name: outlierdetection.Name, + Config: &outlierdetection.LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), // default interval + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + SuccessRateEjection: &outlierdetection.SuccessRateEjection{ + StdevFactor: 2000, + EnforcementPercentage: 50, + MinimumHosts: 10, + RequestVolume: 50, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: clusterimpl.Name, + Config: &clusterimpl.LBConfig{ + Cluster: clusterName, + EDSServiceName: edsServiceName, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }, + }, + }, + }, + }, + IgnoreReresolutionRequests: true, + }, + }, + Priorities: []string{"priority-0-0"}, + } + + select { + case lbCfg := <-testChildPolicy.lbCfgCh: + gotCfg := lbCfg.(*priority.LBConfig) + if diff := cmp.Diff(wantCfg, gotCfg); diff != "" { + t.Fatalf("Child policy received unexpected diff in config (-want +got):\n%s", diff) + } + case <-ctx.Done(): + t.Fatalf("Timeout when waiting for child policy to receive its configuration") + } +} diff --git a/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go new file mode 100644 index 000000000000..f2089e9640a0 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/e2e_test/eds_impl_test.go @@ -0,0 +1,1100 @@ +/* + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e_test + +import ( + "context" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancergroup" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/internal/testutils" + rrutil "google.golang.org/grpc/internal/testutils/roundrobin" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + "google.golang.org/grpc/xds/internal/balancer/priority" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the "cluster_resolver_experimental" LB policy. +) + +const ( + clusterName = "cluster-my-service-client-side-xds" + edsServiceName = "endpoints-my-service-client-side-xds" + localityName1 = "my-locality-1" + localityName2 = "my-locality-2" + localityName3 = "my-locality-3" + + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond + defaultTestWatchExpiryTimeout = 500 * time.Millisecond +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// backendAddressesAndPorts extracts the address and port of each of the +// StubServers passed in and returns them. Fails the test if any of the +// StubServers passed have an invalid address. +func backendAddressesAndPorts(t *testing.T, servers []*stubserver.StubServer) ([]resolver.Address, []uint32) { + addrs := make([]resolver.Address, len(servers)) + ports := make([]uint32, len(servers)) + for i := 0; i < len(servers); i++ { + addrs[i] = resolver.Address{Addr: servers[i].Address} + ports[i] = testutils.ParsePort(t, servers[i].Address) + } + return addrs, ports +} + +func startTestServiceBackends(t *testing.T, numBackends int) ([]*stubserver.StubServer, func()) { + var servers []*stubserver.StubServer + for i := 0; i < numBackends; i++ { + servers = append(servers, stubserver.StartTestService(t, nil)) + servers[i].StartServer() + } + + return servers, func() { + for _, server := range servers { + server.Stop() + } + } +} + +// clientEndpointsResource returns an EDS resource for the specified nodeID, +// service name and localities. +func clientEndpointsResource(nodeID, edsServiceName string, localities []e2e.LocalityOptions) e2e.UpdateOptions { + return e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.EndpointResourceWithOptions(e2e.EndpointOptions{ + ClusterName: edsServiceName, + Host: "localhost", + Localities: localities, + })}, + SkipValidation: true, + } +} + +// TestEDS_OneLocality tests the cluster_resolver LB policy using an EDS +// resource with one locality. The following scenarios are tested: +// 1. Single backend. Test verifies that RPCs reach this backend. +// 2. Add a backend. Test verifies that RPCs are roundrobined across the two +// backends. +// 3. Remove one backend. Test verifies that all RPCs reach the other backend. +// 4. Replace the backend. Test verifies that all RPCs reach the new backend. +func (s) TestEDS_OneLocality(t *testing.T) { + // Spin up a management server to receive xDS resources from. + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + // Start backend servers which provide an implementation of the TestService. + servers, cleanup2 := startTestServiceBackends(t, 3) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Create xDS resources for consumption by the test. We start off with a + // single backend in a single EDS locality. + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}}, + }}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, client)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Ensure RPCs are being roundrobined across the single backend. + testClient := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Add a backend to the same locality, and ensure RPCs are sent in a + // roundrobin fashion across the two backends. + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}, {Port: ports[1]}}, + }}) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[:2]); err != nil { + t.Fatal(err) + } + + // Remove the first backend, and ensure all RPCs are sent to the second + // backend. + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }}) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[1:2]); err != nil { + t.Fatal(err) + } + + // Replace the backend, and ensure all RPCs are sent to the new backend. + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[2]}}, + }}) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[2:3]); err != nil { + t.Fatal(err) + } +} + +// TestEDS_MultipleLocalities tests the cluster_resolver LB policy using an EDS +// resource with multiple localities. The following scenarios are tested: +// 1. Two localities, each with a single backend. Test verifies that RPCs are +// weighted roundrobined across these two backends. +// 2. Add another locality, with a single backend. Test verifies that RPCs are +// weighted roundrobined across all the backends. +// 3. Remove one locality. Test verifies that RPCs are weighted roundrobined +// across backends from the remaining localities. +// 4. Add a backend to one locality. Test verifies that RPCs are weighted +// roundrobined across localities. +// 5. Change the weight of one of the localities. Test verifies that RPCs are +// weighted roundrobined across the localities. +// +// In our LB policy tree, one of the descendents of the "cluster_resolver" LB +// policy is the "weighted_target" LB policy which performs weighted roundrobin +// across localities (and this has a randomness component associated with it). +// Therefore, the moment we have backends from more than one locality, RPCs are +// weighted roundrobined across them. +func (s) TestEDS_MultipleLocalities(t *testing.T) { + // Spin up a management server to receive xDS resources from. + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + // Start backend servers which provide an implementation of the TestService. + servers, cleanup2 := startTestServiceBackends(t, 4) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Create xDS resources for consumption by the test. We start off with two + // localities, and single backend in each of them. + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}}, + }, + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, client)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Ensure RPCs are being weighted roundrobined across the two backends. + testClient := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, addrs[0:2]); err != nil { + t.Fatal(err) + } + + // Add another locality with a single backend, and ensure RPCs are being + // weighted roundrobined across the three backends. + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}}, + }, + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }, + { + Name: localityName3, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[2]}}, + }, + }) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, addrs[0:3]); err != nil { + t.Fatal(err) + } + + // Remove the first locality, and ensure RPCs are being weighted + // roundrobined across the remaining two backends. + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }, + { + Name: localityName3, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[2]}}, + }, + }) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, addrs[1:3]); err != nil { + t.Fatal(err) + } + + // Add a backend to one locality, and ensure weighted roundrobin. Since RPCs + // are roundrobined across localities, locality2's backend will receive + // twice the traffic. + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[1]}}, + }, + { + Name: localityName3, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[2]}, {Port: ports[3]}}, + }, + }) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + wantAddrs := []resolver.Address{addrs[1], addrs[1], addrs[2], addrs[3]} + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, wantAddrs); err != nil { + t.Fatal(err) + } +} + +// TestEDS_EndpointsHealth tests the cluster_resolver LB policy using an EDS +// resource which specifies endpoint health information and verifies that +// traffic is routed only to backends deemed capable of receiving traffic. +func (s) TestEDS_EndpointsHealth(t *testing.T) { + // Spin up a management server to receive xDS resources from. + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + // Start backend servers which provide an implementation of the TestService. + servers, cleanup2 := startTestServiceBackends(t, 12) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Create xDS resources for consumption by the test. Two localities with + // six backends each, with two of the six backends being healthy. Both + // UNKNOWN and HEALTHY are considered by gRPC for load balancing. + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{ + { + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{ + {Port: ports[0], HealthStatus: v3corepb.HealthStatus_UNKNOWN}, + {Port: ports[1], HealthStatus: v3corepb.HealthStatus_HEALTHY}, + {Port: ports[2], HealthStatus: v3corepb.HealthStatus_UNHEALTHY}, + {Port: ports[3], HealthStatus: v3corepb.HealthStatus_DRAINING}, + {Port: ports[4], HealthStatus: v3corepb.HealthStatus_TIMEOUT}, + {Port: ports[5], HealthStatus: v3corepb.HealthStatus_DEGRADED}, + }, + }, + { + Name: localityName2, + Weight: 1, + Backends: []e2e.BackendOptions{ + {Port: ports[6], HealthStatus: v3corepb.HealthStatus_UNKNOWN}, + {Port: ports[7], HealthStatus: v3corepb.HealthStatus_HEALTHY}, + {Port: ports[8], HealthStatus: v3corepb.HealthStatus_UNHEALTHY}, + {Port: ports[9], HealthStatus: v3corepb.HealthStatus_DRAINING}, + {Port: ports[10], HealthStatus: v3corepb.HealthStatus_TIMEOUT}, + {Port: ports[11], HealthStatus: v3corepb.HealthStatus_DEGRADED}, + }, + }, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, client)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Ensure RPCs are being weighted roundrobined across healthy backends from + // both localities. + testClient := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckWeightedRoundRobinRPCs(ctx, testClient, append(addrs[0:2], addrs[6:8]...)); err != nil { + t.Fatal(err) + } +} + +// TestEDS_EmptyUpdate tests the cluster_resolver LB policy using an EDS +// resource with no localities and verifies that RPCs fail with "all priorities +// removed" error. +func (s) TestEDS_EmptyUpdate(t *testing.T) { + // Spin up a management server to receive xDS resources from. + managementServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + // Start backend servers which provide an implementation of the TestService. + servers, cleanup2 := startTestServiceBackends(t, 4) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + oldCacheTimeout := balancergroup.DefaultSubBalancerCloseTimeout + balancergroup.DefaultSubBalancerCloseTimeout = 100 * time.Microsecond + defer func() { balancergroup.DefaultSubBalancerCloseTimeout = oldCacheTimeout }() + + // Create xDS resources for consumption by the test. The first update is an + // empty update. This should put the channel in TRANSIENT_FAILURE. + resources := clientEndpointsResource(nodeID, edsServiceName, nil) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, client)) + + // Create a ClientConn and ensure that RPCs fail with "all priorities + // removed" error. This is the expected error when the cluster_resolver LB + // policy receives an EDS update with no localities. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + testClient := testgrpc.NewTestServiceClient(cc) + if err := waitForAllPrioritiesRemovedError(ctx, t, testClient); err != nil { + t.Fatal(err) + } + + // Add a locality with one backend and ensure RPCs are successful. + resources = clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: ports[0]}}, + }}) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := rrutil.CheckRoundRobinRPCs(ctx, testClient, addrs[:1]); err != nil { + t.Fatal(err) + } + + // Push another empty update and ensure that RPCs fail with "all priorities + // removed" error again. + resources = clientEndpointsResource(nodeID, edsServiceName, nil) + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + if err := waitForAllPrioritiesRemovedError(ctx, t, testClient); err != nil { + t.Fatal(err) + } +} + +// TestEDS_ResourceRemoved tests the case where the EDS resource requested by +// the clusterresolver LB policy is removed from the management server. The test +// verifies that the EDS watch is not canceled and that RPCs continue to succeed +// with the previously received configuration. +func (s) TestEDS_ResourceRemoved(t *testing.T) { + // Start an xDS management server that uses a couple of channels to + // notify the test about the following events: + // - an EDS requested with the expected resource name is requested + // - EDS resource is unrequested, i.e, an EDS request with no resource name + // is received, which indicates that we are not longer interested in that + // resource. + edsResourceRequestedCh := make(chan struct{}, 1) + edsResourceCanceledCh := make(chan struct{}, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() == version.V3EndpointsURL { + switch len(req.GetResourceNames()) { + case 0: + select { + case edsResourceCanceledCh <- struct{}{}: + default: + } + case 1: + if req.GetResourceNames()[0] == edsServiceName { + select { + case edsResourceRequestedCh <- struct{}{}: + default: + } + } + default: + t.Errorf("Unexpected number of resources, %d, in an EDS request", len(req.GetResourceNames())) + } + } + return nil + }, + }) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + // Delete the endpoints resource from the mangement server. + resources.Endpoints = nil + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to succeed for the next second, and that the + // EDS watch is not canceled. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + select { + case <-edsResourceCanceledCh: + t.Fatal("EDS watch canceled when not expected to be canceled") + default: + } + } +} + +// TestEDS_ClusterResourceDoesNotContainEDSServiceName tests the case where the +// Cluster resource sent by the management server does not contain an EDS +// service name. The test verifies that the cluster_resolver LB policy uses the +// cluster name for the EDS resource. +func (s) TestEDS_ClusterResourceDoesNotContainEDSServiceName(t *testing.T) { + edsResourceCh := make(chan string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) > 0 { + select { + case edsResourceCh <- req.GetResourceNames()[0]: + default: + } + } + return nil + }, + }) + defer cleanup() + + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Configure cluster and endpoints resources with the same name in the management server. The cluster resource does not specify an EDS service name. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, "", e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(clusterName, "localhost", []uint32{testutils.ParsePort(t, server.Address)})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case name := <-edsResourceCh: + if name != clusterName { + t.Fatalf("Received EDS request with resource name %q, want %q", name, clusterName) + } + } +} + +// TestEDS_ClusterResourceUpdates verifies different scenarios with regards to +// cluster resource updates. +// +// - The first cluster resource contains an eds_service_name. The test verifies +// that an EDS request is sent for the received eds_service_name. It also +// verifies that a subsequent RPC gets routed to a backend belonging to that +// service name. +// - The next cluster resource update contains no eds_service_name. The test +// verifies that a subsequent EDS request is sent for the cluster_name and +// that the previously received eds_service_name is no longer requested. It +// also verifies that a subsequent RPC gets routed to a backend belonging to +// the service represented by the cluster_name. +// - The next cluster resource update changes the circuit breaking +// configuration, but does not change the service name. The test verifies +// that a subsequent RPC gets routed to the same backend as before. +func (s) TestEDS_ClusterResourceUpdates(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Start an xDS management server that pushes the EDS resource names onto a + // channel. + edsResourceNameCh := make(chan []string, 1) + managementServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() != version.V3EndpointsURL { + return nil + } + if len(req.GetResourceNames()) == 0 { + // This is the case for ACKs. Do nothing here. + return nil + } + select { + case <-ctx.Done(): + case edsResourceNameCh <- req.GetResourceNames(): + } + return nil + }, + AllowResourceSubset: true, + }) + defer cleanup() + + // Start two test backends and extract their host and port. The first + // backend is used for the EDS resource identified by the eds_service_name, + // and the second backend is used for the EDS resource identified by the + // cluster_name. + servers, cleanup2 := startTestServiceBackends(t, 2) + defer cleanup2() + addrs, ports := backendAddressesAndPorts(t, servers) + + // Configure cluster and endpoints resources in the management server. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, edsServiceName, e2e.SecurityLevelNone)}, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(edsServiceName, "localhost", []uint32{uint32(ports[0])}), + e2e.DefaultEndpoint(clusterName, "localhost", []uint32{uint32(ports[1])}), + }, + SkipValidation: true, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create xDS client, configure cds_experimental LB policy with a manual + // resolver, and dial the test backends. + cc, cleanup := setupAndDial(t, bootstrapContents) + defer cleanup() + + client := testgrpc.NewTestServiceClient(cc) + peer := &peer.Peer{} + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[0].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[0].Addr) + } + + // Ensure EDS watch is registered for eds_service_name. + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for EDS request to be received on the management server") + case names := <-edsResourceNameCh: + if !cmp.Equal(names, []string{edsServiceName}) { + t.Fatalf("Received EDS request with resource names %v, want %v", names, []string{edsServiceName}) + } + } + + // Change the cluster resource to not contain an eds_service_name. + resources.Clusters = []*v3clusterpb.Cluster{e2e.DefaultCluster(clusterName, "", e2e.SecurityLevelNone)} + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that an EDS watch for eds_service_name is canceled and new watch + // for cluster_name is registered. The actual order in which this happens is + // not deterministic, i.e the watch for old resource could be canceled + // before the new one is registered or vice-versa. In either case, + // eventually, we want to see a request to the management server for just + // the cluster_name. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + names := <-edsResourceNameCh + if cmp.Equal(names, []string{clusterName}) { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for old EDS watch %q to be canceled and new one %q to be registered", edsServiceName, clusterName) + } + + // Make a RPC, and ensure that it gets routed to second backend, + // corresponding to the cluster_name. + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + continue + } + if peer.Addr.String() == addrs[1].Addr { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for EmptyCall() to be routed to correct backend %q", addrs[1].Addr) + } + + // Change cluster resource circuit breaking count. + resources.Clusters[0].CircuitBreakers = &v3clusterpb.CircuitBreakers{ + Thresholds: []*v3clusterpb.CircuitBreakers_Thresholds{ + { + Priority: v3corepb.RoutingPriority_DEFAULT, + MaxRequests: wrapperspb.UInt32(512), + }, + }, + } + if err := managementServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to get routed to the second backend for the + // next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(peer)); err != nil { + t.Fatalf("EmptyCall() failed: %v", err) + } + if peer.Addr.String() != addrs[1].Addr { + t.Fatalf("EmptyCall() routed to backend %q, want %q", peer.Addr, addrs[1].Addr) + } + } +} + +// TestEDS_BadUpdateWithoutPreviousGoodUpdate tests the case where the +// management server sends a bad update (one that is NACKed by the xDS client). +// Since the cluster_resolver LB policy does not have a previously received good +// update, it is expected to treat this bad update as though it received an +// update with no endpoints. Hence RPCs are expected to fail with "all +// priorities removed" error. +func (s) TestEDS_BadUpdateWithoutPreviousGoodUpdate(t *testing.T) { + // Spin up a management server to receive xDS resources from. + mgmtServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + // Start a backend server that implements the TestService. + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Create an EDS resource with a load balancing weight of 0. This will + // result in the resource being NACKed by the xDS client. Since the + // cluster_resolver LB policy does not have a previously received good EDS + // update, it should treat this update as an empty EDS update. + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: testutils.ParsePort(t, server.Address)}}, + }}) + resources.Endpoints[0].Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn and verify that RPCs fail with "all priorities + // removed" error. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + client := testgrpc.NewTestServiceClient(cc) + if err := waitForAllPrioritiesRemovedError(ctx, t, client); err != nil { + t.Fatal(err) + } +} + +// TestEDS_BadUpdateWithPreviousGoodUpdate tests the case where the +// cluster_resolver LB policy receives a good EDS update from the management +// server and the test verifies that RPCs are successful. Then, a bad update is +// received from the management server (one that is NACKed by the xDS client). +// The test verifies that the previously received good update is still being +// used and that RPCs are still successful. +func (s) TestEDS_BadUpdateWithPreviousGoodUpdate(t *testing.T) { + // Spin up a management server to receive xDS resources from. + mgmtServer, nodeID, bootstrapContents, _, cleanup1 := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup1() + + // Start a backend server that implements the TestService. + server := stubserver.StartTestService(t, nil) + defer server.Stop() + + // Create an EDS resource for consumption by the test. + resources := clientEndpointsResource(nodeID, edsServiceName, []e2e.LocalityOptions{{ + Name: localityName1, + Weight: 1, + Backends: []e2e.BackendOptions{{Port: testutils.ParsePort(t, server.Address)}}, + }}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Create an xDS client for use by the cluster_resolver LB policy. + xdsClient, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Create a manual resolver and push a service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn and make a successful RPC. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Ensure RPCs are being roundrobined across the single backend. + client := testgrpc.NewTestServiceClient(cc) + if err := rrutil.CheckRoundRobinRPCs(ctx, client, []resolver.Address{{Addr: server.Address}}); err != nil { + t.Fatal(err) + } + + // Update the endpoints resource in the management server with a load + // balancing weight of 0. This will result in the resource being NACKed by + // the xDS client. But since the cluster_resolver LB policy has a previously + // received good EDS update, it should continue using it. + resources.Endpoints[0].Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure that RPCs continue to succeed for the next second. + for end := time.Now().Add(time.Second); time.Now().Before(end); <-time.After(defaultTestShortTimeout) { + if err := rrutil.CheckRoundRobinRPCs(ctx, client, []resolver.Address{{Addr: server.Address}}); err != nil { + t.Fatal(err) + } + } +} + +// TestEDS_ResourceNotFound tests the case where the requested EDS resource does +// not exist on the management server. Once the watch timer associated with the +// requested resource expires, the cluster_resolver LB policy receives a +// "resource-not-found" callback from the xDS client and is expected to treat it +// as though it received an update with no endpoints. Hence RPCs are expected to +// fail with "all priorities removed" error. +func (s) TestEDS_ResourceNotFound(t *testing.T) { + // Spin up a management server to receive xDS resources from. + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server, configured + // with a short watch expiry timeout. + nodeID := uuid.New().String() + xdsClient, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Configure no resources on the management server. + resources := e2e.UpdateOptions{NodeID: nodeID, SkipValidation: true} + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Create a manual resolver and push a service config specifying the use of + // the cluster_resolver LB policy with a single discovery mechanism. + r := manual.NewBuilderWithScheme("whatever") + jsonSC := fmt.Sprintf(`{ + "loadBalancingConfig":[{ + "cluster_resolver_experimental":{ + "discoveryMechanisms": [{ + "cluster": "%s", + "type": "EDS", + "edsServiceName": "%s", + "outlierDetection": {} + }], + "xdsLbPolicy":[{"round_robin":{}}] + } + }] + }`, clusterName, edsServiceName) + scpr := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) + r.InitialState(xdsclient.SetClient(resolver.State{ServiceConfig: scpr}, xdsClient)) + + // Create a ClientConn and verify that RPCs fail with "all priorities + // removed" error. + cc, err := grpc.Dial(r.Scheme()+":///test.service", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(r)) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + client := testgrpc.NewTestServiceClient(cc) + if err := waitForAllPrioritiesRemovedError(ctx, t, client); err != nil { + t.Fatal(err) + } +} + +// waitForAllPrioritiesRemovedError repeatedly makes RPCs using the +// TestServiceClient until they fail with an error which indicates that all +// priorities have been removed. A non-nil error is returned if the context +// expires before RPCs fail with the expected error. +func waitForAllPrioritiesRemovedError(ctx context.Context, t *testing.T, client testgrpc.TestServiceClient) error { + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + _, err := client.EmptyCall(ctx, &testpb.Empty{}) + if err == nil { + t.Log("EmptyCall() succeeded after EDS update with no localities") + continue + } + if code := status.Code(err); code != codes.Unavailable { + t.Logf("EmptyCall() returned code: %v, want: %v", code, codes.Unavailable) + continue + } + if !strings.Contains(err.Error(), priority.ErrAllPrioritiesRemoved.Error()) { + t.Logf("EmptyCall() = %v, want %v", err, priority.ErrAllPrioritiesRemoved) + continue + } + return nil + } + return errors.New("timeout when waiting for RPCs to fail with UNAVAILABLE status and priority.ErrAllPrioritiesRemoved error") +} diff --git a/xds/internal/client/logging.go b/xds/internal/balancer/clusterresolver/logging.go similarity index 84% rename from xds/internal/client/logging.go rename to xds/internal/balancer/clusterresolver/logging.go index bff3fb1d3df3..728f1f709c28 100644 --- a/xds/internal/client/logging.go +++ b/xds/internal/balancer/clusterresolver/logging.go @@ -16,7 +16,7 @@ * */ -package client +package clusterresolver import ( "fmt" @@ -25,10 +25,10 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const prefix = "[xds-client %p] " +const prefix = "[xds-cluster-resolver-lb %p] " var logger = grpclog.Component("xds") -func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { +func prefixLogger(p *clusterResolverBalancer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } diff --git a/xds/internal/balancer/clusterresolver/resource_resolver.go b/xds/internal/balancer/clusterresolver/resource_resolver.go new file mode 100644 index 000000000000..0aa47913d693 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// resourceUpdate is a combined update from all the resources, in the order of +// priority. For example, it can be {EDS, EDS, DNS}. +type resourceUpdate struct { + priorities []priorityConfig + err error +} + +// topLevelResolver is used by concrete endpointsResolver implementations for +// reporting updates and errors. The `resourceResolver` type implements this +// interface and takes appropriate actions upon receipt of updates and errors +// from underlying concrete resolvers. +type topLevelResolver interface { + onUpdate() + onError(error) +} + +// endpointsResolver wraps the functionality to resolve a given resource name to +// a set of endpoints. The mechanism used by concrete implementations depend on +// the supported discovery mechanism type. +type endpointsResolver interface { + // lastUpdate returns endpoint results from the most recent resolution. + // + // The type of the first return result is dependent on the resolver + // implementation. + // + // The second return result indicates whether the resolver was able to + // successfully resolve the resource name to endpoints. If set to false, the + // first return result is invalid and must not be used. + lastUpdate() (interface{}, bool) + + // resolverNow triggers re-resolution of the resource. + resolveNow() + + // stop stops resolution of the resource. Implementations must not invoke + // any methods on the topLevelResolver interface once `stop()` returns. + stop() +} + +// discoveryMechanismKey is {type+resource_name}, it's used as the map key, so +// that the same resource resolver can be reused (e.g. when there are two +// mechanisms, both for the same EDS resource, but has different circuit +// breaking config. +type discoveryMechanismKey struct { + typ DiscoveryMechanismType + name string +} + +// discoveryMechanismAndResolver is needed to keep the resolver and the +// discovery mechanism together, because resolvers can be shared. And we need +// the mechanism for fields like circuit breaking, LRS etc when generating the +// balancer config. +type discoveryMechanismAndResolver struct { + dm DiscoveryMechanism + r endpointsResolver + + childNameGen *nameGenerator +} + +type resourceResolver struct { + parent *clusterResolverBalancer + logger *grpclog.PrefixLogger + updateChannel chan *resourceUpdate + + // mu protects the slice and map, and content of the resolvers in the slice. + mu sync.Mutex + mechanisms []DiscoveryMechanism + children []discoveryMechanismAndResolver + // childrenMap's value only needs the resolver implementation (type + // discoveryMechanism) and the childNameGen. The other two fields are not + // used. + // + // TODO(cleanup): maybe we can make a new type with just the necessary + // fields, and use it here instead. + childrenMap map[discoveryMechanismKey]discoveryMechanismAndResolver + // Each new discovery mechanism needs a child name generator to reuse child + // policy names. But to make sure the names across discover mechanism + // doesn't conflict, we need a seq ID. This ID is incremented for each new + // discover mechanism. + childNameGeneratorSeqID uint64 +} + +func newResourceResolver(parent *clusterResolverBalancer, logger *grpclog.PrefixLogger) *resourceResolver { + return &resourceResolver{ + parent: parent, + logger: logger, + updateChannel: make(chan *resourceUpdate, 1), + childrenMap: make(map[discoveryMechanismKey]discoveryMechanismAndResolver), + } +} + +func equalDiscoveryMechanisms(a, b []DiscoveryMechanism) bool { + if len(a) != len(b) { + return false + } + for i, aa := range a { + bb := b[i] + if !aa.Equal(bb) { + return false + } + } + return true +} + +func discoveryMechanismToKey(dm DiscoveryMechanism) discoveryMechanismKey { + switch dm.Type { + case DiscoveryMechanismTypeEDS: + nameToWatch := dm.EDSServiceName + if nameToWatch == "" { + nameToWatch = dm.Cluster + } + return discoveryMechanismKey{typ: dm.Type, name: nameToWatch} + case DiscoveryMechanismTypeLogicalDNS: + return discoveryMechanismKey{typ: dm.Type, name: dm.DNSHostname} + default: + return discoveryMechanismKey{} + } +} + +func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { + rr.mu.Lock() + defer rr.mu.Unlock() + if equalDiscoveryMechanisms(rr.mechanisms, mechanisms) { + return + } + rr.mechanisms = mechanisms + rr.children = make([]discoveryMechanismAndResolver, len(mechanisms)) + newDMs := make(map[discoveryMechanismKey]bool) + + // Start one watch for each new discover mechanism {type+resource_name}. + for i, dm := range mechanisms { + dmKey := discoveryMechanismToKey(dm) + newDMs[dmKey] = true + dmAndResolver, ok := rr.childrenMap[dmKey] + if ok { + // If this is not new, keep the fields (especially childNameGen), + // and only update the DiscoveryMechanism. + // + // Note that the same dmKey doesn't mean the same + // DiscoveryMechanism. There are fields (e.g. + // MaxConcurrentRequests) in DiscoveryMechanism that are not copied + // to dmKey, we need to keep those updated. + dmAndResolver.dm = dm + rr.children[i] = dmAndResolver + continue + } + + // Create resolver for a newly seen resource. + var resolver endpointsResolver + switch dm.Type { + case DiscoveryMechanismTypeEDS: + resolver = newEDSResolver(dmKey.name, rr.parent.xdsClient, rr, rr.logger) + case DiscoveryMechanismTypeLogicalDNS: + resolver = newDNSResolver(dmKey.name, rr, rr.logger) + } + dmAndResolver = discoveryMechanismAndResolver{ + dm: dm, + r: resolver, + childNameGen: newNameGenerator(rr.childNameGeneratorSeqID), + } + rr.childrenMap[dmKey] = dmAndResolver + rr.children[i] = dmAndResolver + rr.childNameGeneratorSeqID++ + } + + // Stop the resources that were removed. + for dm, r := range rr.childrenMap { + if !newDMs[dm] { + delete(rr.childrenMap, dm) + r.r.stop() + } + } + // Regenerate even if there's no change in discovery mechanism, in case + // priority order changed. + rr.generateLocked() +} + +// resolveNow is typically called to trigger re-resolve of DNS. The EDS +// resolveNow() is a noop. +func (rr *resourceResolver) resolveNow() { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, r := range rr.childrenMap { + r.r.resolveNow() + } +} + +func (rr *resourceResolver) stop() { + rr.mu.Lock() + // Save the previous childrenMap to stop the children outside the mutex, + // and reinitialize the map. We only need to reinitialize to allow for the + // policy to be reused if the resource comes back. In practice, this does + // not happen as the parent LB policy will also be closed, causing this to + // be removed entirely, but a future use case might want to reuse the + // policy instead. + cm := rr.childrenMap + rr.childrenMap = make(map[discoveryMechanismKey]discoveryMechanismAndResolver) + rr.mechanisms = nil + rr.children = nil + rr.mu.Unlock() + + for _, r := range cm { + r.r.stop() + } + + // stop() is called when the LB policy is closed or when the underlying + // cluster resource is removed by the management server. In the latter case, + // an empty config update needs to be pushed to the child policy to ensure + // that a picker that fails RPCs is sent up to the channel. + // + // Resource resolver implementations are expected to not send any updates + // after they are stopped. Therefore, we don't have to worry about another + // write to this channel happening at the same time as this one. + select { + case <-rr.updateChannel: + default: + } + rr.updateChannel <- &resourceUpdate{} +} + +// generateLocked collects updates from all resolvers. It pushes the combined +// result on the update channel if all child resolvers have received at least +// one update. Otherwise it returns early. +// +// caller must hold rr.mu. +func (rr *resourceResolver) generateLocked() { + var ret []priorityConfig + for _, rDM := range rr.children { + u, ok := rDM.r.lastUpdate() + if !ok { + // Don't send updates to parent until all resolvers have update to + // send. + return + } + switch uu := u.(type) { + case xdsresource.EndpointsUpdate: + ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu, childNameGen: rDM.childNameGen}) + case []string: + ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu, childNameGen: rDM.childNameGen}) + } + } + select { + case <-rr.updateChannel: + default: + } + rr.updateChannel <- &resourceUpdate{priorities: ret} +} + +func (rr *resourceResolver) onUpdate() { + rr.mu.Lock() + rr.generateLocked() + rr.mu.Unlock() +} + +func (rr *resourceResolver) onError(err error) { + select { + case <-rr.updateChannel: + default: + } + rr.updateChannel <- &resourceUpdate{err: err} +} diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go new file mode 100644 index 000000000000..0da74f628db5 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "fmt" + "net/url" + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + newDNS = func(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + // The dns resolver is registered by the grpc package. So, this call to + // resolver.Get() is never expected to return nil. + return resolver.Get("dns").Build(target, cc, opts) + } +) + +// dnsDiscoveryMechanism watches updates for the given DNS hostname. +// +// It implements resolver.ClientConn interface to work with the DNS resolver. +type dnsDiscoveryMechanism struct { + target string + topLevelResolver topLevelResolver + dnsR resolver.Resolver + logger *grpclog.PrefixLogger + + mu sync.Mutex + addrs []string + updateReceived bool +} + +// newDNSResolver creates an endpoints resolver which uses a DNS resolver under +// the hood. +// +// An error in parsing the provided target string or an error in creating a DNS +// resolver means that we will never be able to resolve the provided target +// strings to endpoints. The topLevelResolver propagates address updates to the +// clusterresolver LB policy **only** after it receives updates from all its +// child resolvers. Therefore, an error here means that the topLevelResolver +// will never send address updates to the clusterresolver LB policy. +// +// Calling the onError() callback will ensure that this error is +// propagated to the child policy which eventually move the channel to +// transient failure. +// +// The `dnsR` field is unset if we run into erros in this function. Therefore, a +// nil check is required wherever we access that field. +func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *dnsDiscoveryMechanism { + ret := &dnsDiscoveryMechanism{ + target: target, + topLevelResolver: topLevelResolver, + logger: logger, + } + u, err := url.Parse("dns:///" + target) + if err != nil { + topLevelResolver.onError(fmt.Errorf("failed to parse dns hostname %q in clusterresolver LB policy", target)) + return ret + } + + r, err := newDNS(resolver.Target{URL: *u}, ret, resolver.BuildOptions{}) + if err != nil { + topLevelResolver.onError(fmt.Errorf("failed to build DNS resolver for target %q: %v", target, err)) + return ret + } + ret.dnsR = r + return ret +} + +func (dr *dnsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + dr.mu.Lock() + defer dr.mu.Unlock() + + if !dr.updateReceived { + return nil, false + } + return dr.addrs, true +} + +func (dr *dnsDiscoveryMechanism) resolveNow() { + if dr.dnsR != nil { + dr.dnsR.ResolveNow(resolver.ResolveNowOptions{}) + } +} + +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. The +// underlying dns resolver does not send any updates to the resolver.ClientConn +// interface passed to it (implemented by dnsDiscoveryMechanism in this case) +// after its `Close()` returns. Therefore, we can guarantee that no methods of +// the topLevelResolver are invoked after we return from this method. +func (dr *dnsDiscoveryMechanism) stop() { + if dr.dnsR != nil { + dr.dnsR.Close() + } +} + +// dnsDiscoveryMechanism needs to implement resolver.ClientConn interface to receive +// updates from the real DNS resolver. + +func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported an update: %s", dr.target, pretty.ToJSON(state)) + } + + dr.mu.Lock() + addrs := make([]string, len(state.Addresses)) + for i, a := range state.Addresses { + addrs[i] = a.Addr + } + dr.addrs = addrs + dr.updateReceived = true + dr.mu.Unlock() + + dr.topLevelResolver.onUpdate() + return nil +} + +func (dr *dnsDiscoveryMechanism) ReportError(err error) { + if dr.logger.V(2) { + dr.logger.Infof("DNS discovery mechanism for resource %q reported error: %v", dr.target, err) + } + + dr.topLevelResolver.onError(err) +} + +func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { + dr.UpdateState(resolver.State{Addresses: addresses}) +} + +func (dr *dnsDiscoveryMechanism) NewServiceConfig(string) { + // This method is deprecated, and service config isn't supported. +} + +func (dr *dnsDiscoveryMechanism) ParseServiceConfig(string) *serviceconfig.ParseResult { + return &serviceconfig.ParseResult{Err: fmt.Errorf("service config not supported")} +} diff --git a/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go new file mode 100644 index 000000000000..86af73cbae21 --- /dev/null +++ b/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package clusterresolver + +import ( + "sync" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +type edsDiscoveryMechanism struct { + nameToWatch string + cancelWatch func() + topLevelResolver topLevelResolver + stopped *grpcsync.Event + logger *grpclog.PrefixLogger + + mu sync.Mutex + update *xdsresource.EndpointsUpdate // Nil indicates no update received so far. +} + +func (er *edsDiscoveryMechanism) lastUpdate() (interface{}, bool) { + er.mu.Lock() + defer er.mu.Unlock() + + if er.update == nil { + return nil, false + } + return *er.update, true +} + +func (er *edsDiscoveryMechanism) resolveNow() { +} + +// The definition of stop() mentions that implementations must not invoke any +// methods on the topLevelResolver once the call to `stop()` returns. +func (er *edsDiscoveryMechanism) stop() { + // Canceling a watch with the xDS client can race with an xDS response + // received around the same time, and can result in the watch callback being + // invoked after the watch is canceled. Callers need to handle this race, + // and we fire the stopped event here to ensure that a watch callback + // invocation around the same time becomes a no-op. + er.stopped.Fire() + er.cancelWatch() +} + +// newEDSResolver returns an implementation of the endpointsResolver interface +// that uses EDS to resolve the given name to endpoints. +func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelResolver topLevelResolver, logger *grpclog.PrefixLogger) *edsDiscoveryMechanism { + ret := &edsDiscoveryMechanism{ + nameToWatch: nameToWatch, + topLevelResolver: topLevelResolver, + logger: logger, + stopped: grpcsync.NewEvent(), + } + ret.cancelWatch = xdsresource.WatchEndpoints(producer, nameToWatch, ret) + return ret +} + +// OnUpdate is invoked to report an update for the resource being watched. +func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData) { + if er.stopped.HasFired() { + return + } + + er.mu.Lock() + er.update = &update.Resource + er.mu.Unlock() + + er.topLevelResolver.onUpdate() +} + +func (er *edsDiscoveryMechanism) OnError(err error) { + if er.stopped.HasFired() { + return + } + + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported error: %v", er.nameToWatch, err) + } + + er.mu.Lock() + if er.update != nil { + // Continue using a previously received good configuration if one + // exists. + er.mu.Unlock() + return + } + + // Else report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate() +} + +func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { + if er.stopped.HasFired() { + return + } + + if er.logger.V(2) { + er.logger.Infof("EDS discovery mechanism for resource %q reported resource-does-not-exist error", er.nameToWatch) + } + + // Report an empty update that would result in no priority child being + // created for this discovery mechanism. This would result in the priority + // LB policy reporting TRANSIENT_FAILURE (as there would be no priorities or + // localities) if this was the only discovery mechanism, or would result in + // the priority LB policy using a lower priority discovery mechanism when + // that becomes available. + er.mu.Lock() + er.update = &xdsresource.EndpointsUpdate{} + er.mu.Unlock() + + er.topLevelResolver.onUpdate() +} diff --git a/xds/internal/balancer/edsbalancer/config.go b/xds/internal/balancer/edsbalancer/config.go deleted file mode 100644 index 11c1338c81f7..000000000000 --- a/xds/internal/balancer/edsbalancer/config.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "encoding/json" - "fmt" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/serviceconfig" -) - -// EDSConfig represents the loadBalancingConfig section of the service config -// for EDS balancers. -type EDSConfig struct { - serviceconfig.LoadBalancingConfig - // ChildPolicy represents the load balancing config for the child - // policy. - ChildPolicy *loadBalancingConfig - // FallBackPolicy represents the load balancing config for the - // fallback. - FallBackPolicy *loadBalancingConfig - // Name to use in EDS query. If not present, defaults to the server - // name from the target URI. - EDSServiceName string - // MaxConcurrentRequests is the max number of concurrent request allowed for - // this service. If unset, default value 1024 is used. - // - // Note that this is not defined in the service config proto. And the reason - // is, we are dropping EDS and moving the features into cluster_impl. But in - // the mean time, to keep things working, we need to add this field. And it - // should be fine to add this extra field here, because EDS is only used in - // CDS today, so we have full control. - MaxConcurrentRequests *uint32 - // LRS server to send load reports to. If not present, load reporting - // will be disabled. If set to the empty string, load reporting will - // be sent to the same server that we obtained CDS data from. - LrsLoadReportingServerName *string -} - -// edsConfigJSON is the intermediate unmarshal result of EDSConfig. ChildPolicy -// and Fallbackspolicy are post-processed, and for each, the first installed -// policy is kept. -type edsConfigJSON struct { - ChildPolicy []*loadBalancingConfig - FallbackPolicy []*loadBalancingConfig - EDSServiceName string - MaxConcurrentRequests *uint32 - LRSLoadReportingServerName *string -} - -// UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. -// When unmarshalling, we iterate through the childPolicy/fallbackPolicy lists -// and select the first LB policy which has been registered. -func (l *EDSConfig) UnmarshalJSON(data []byte) error { - var configJSON edsConfigJSON - if err := json.Unmarshal(data, &configJSON); err != nil { - return err - } - - l.EDSServiceName = configJSON.EDSServiceName - l.MaxConcurrentRequests = configJSON.MaxConcurrentRequests - l.LrsLoadReportingServerName = configJSON.LRSLoadReportingServerName - - for _, lbcfg := range configJSON.ChildPolicy { - if balancer.Get(lbcfg.Name) != nil { - l.ChildPolicy = lbcfg - break - } - } - - for _, lbcfg := range configJSON.FallbackPolicy { - if balancer.Get(lbcfg.Name) != nil { - l.FallBackPolicy = lbcfg - break - } - } - return nil -} - -// MarshalJSON returns a JSON encoding of l. -func (l *EDSConfig) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("EDSConfig.MarshalJSON() is unimplemented") -} - -// loadBalancingConfig represents a single load balancing config, -// stored in JSON format. -type loadBalancingConfig struct { - Name string - Config json.RawMessage -} - -// MarshalJSON returns a JSON encoding of l. -func (l *loadBalancingConfig) MarshalJSON() ([]byte, error) { - return nil, fmt.Errorf("loadBalancingConfig.MarshalJSON() is unimplemented") -} - -// UnmarshalJSON parses the JSON-encoded byte slice in data and stores it in l. -func (l *loadBalancingConfig) UnmarshalJSON(data []byte) error { - var cfg map[string]json.RawMessage - if err := json.Unmarshal(data, &cfg); err != nil { - return err - } - for name, config := range cfg { - l.Name = name - l.Config = config - } - return nil -} diff --git a/xds/internal/balancer/edsbalancer/eds.go b/xds/internal/balancer/edsbalancer/eds.go deleted file mode 100644 index 423df7aed95e..000000000000 --- a/xds/internal/balancer/edsbalancer/eds.go +++ /dev/null @@ -1,392 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package edsbalancer contains EDS balancer implementation. -package edsbalancer - -import ( - "encoding/json" - "fmt" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/serviceconfig" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" -) - -const edsName = "eds_experimental" - -// xdsClientInterface contains only the xds_client methods needed by EDS -// balancer. It's defined so we can override xdsclient.New function in tests. -type xdsClientInterface interface { - WatchEndpoints(clusterName string, edsCb func(xdsclient.EndpointsUpdate, error)) (cancel func()) - ReportLoad(server string) (loadStore *load.Store, cancel func()) - Close() -} - -var ( - newEDSBalancer = func(cc balancer.ClientConn, opts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lw load.PerClusterReporter, logger *grpclog.PrefixLogger) edsBalancerImplInterface { - return newEDSBalancerImpl(cc, opts, enqueueState, lw, logger) - } - newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } -) - -func init() { - balancer.Register(&edsBalancerBuilder{}) -} - -type edsBalancerBuilder struct{} - -// Build helps implement the balancer.Builder interface. -func (b *edsBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - x := &edsBalancer{ - cc: cc, - closed: grpcsync.NewEvent(), - grpcUpdate: make(chan interface{}), - xdsClientUpdate: make(chan *edsUpdate), - childPolicyUpdate: buffer.NewUnbounded(), - lsw: &loadStoreWrapper{}, - config: &EDSConfig{}, - } - x.logger = prefixLogger(x) - - client, err := newXDSClient() - if err != nil { - x.logger.Errorf("xds: failed to create xds-client: %v", err) - return nil - } - - x.xdsClient = client - x.edsImpl = newEDSBalancer(x.cc, opts, x.enqueueChildBalancerState, x.lsw, x.logger) - x.logger.Infof("Created") - go x.run() - return x -} - -func (b *edsBalancerBuilder) Name() string { - return edsName -} - -func (b *edsBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - var cfg EDSConfig - if err := json.Unmarshal(c, &cfg); err != nil { - return nil, fmt.Errorf("unable to unmarshal balancer config %s into EDSConfig, error: %v", string(c), err) - } - return &cfg, nil -} - -// edsBalancerImplInterface defines the interface that edsBalancerImpl must -// implement to communicate with edsBalancer. -// -// It's implemented by the real eds balancer and a fake testing eds balancer. -type edsBalancerImplInterface interface { - // handleEDSResponse passes the received EDS message from traffic director - // to eds balancer. - handleEDSResponse(edsResp xdsclient.EndpointsUpdate) - // handleChildPolicy updates the eds balancer the intra-cluster load - // balancing policy to use. - handleChildPolicy(name string, config json.RawMessage) - // handleSubConnStateChange handles state change for SubConn. - handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) - // updateState handle a balancer state update from the priority. - updateState(priority priorityType, s balancer.State) - // updateServiceRequestsConfig updates the service requests counter to the - // one for the given service name. - updateServiceRequestsConfig(serviceName string, max *uint32) - // close closes the eds balancer. - close() -} - -// edsBalancer manages xdsClient and the actual EDS balancer implementation that -// does load balancing. -// -// It currently has only an edsBalancer. Later, we may add fallback. -type edsBalancer struct { - cc balancer.ClientConn - closed *grpcsync.Event - logger *grpclog.PrefixLogger - - // edsBalancer continuously monitors the channels below, and will handle - // events from them in sync. - grpcUpdate chan interface{} - xdsClientUpdate chan *edsUpdate - childPolicyUpdate *buffer.Unbounded - - xdsClient xdsClientInterface - lsw *loadStoreWrapper - config *EDSConfig // may change when passed a different service config - edsImpl edsBalancerImplInterface - - // edsServiceName is the edsServiceName currently being watched, not - // necessary the edsServiceName from service config. - edsServiceName string - cancelEndpointsWatch func() - loadReportServer *string // LRS is disabled if loadReporterServer is nil. - cancelLoadReport func() -} - -// run gets executed in a goroutine once edsBalancer is created. It monitors -// updates from grpc, xdsClient and load balancer. It synchronizes the -// operations that happen inside edsBalancer. It exits when edsBalancer is -// closed. -func (x *edsBalancer) run() { - for { - select { - case update := <-x.grpcUpdate: - x.handleGRPCUpdate(update) - case update := <-x.xdsClientUpdate: - x.handleXDSClientUpdate(update) - case update := <-x.childPolicyUpdate.Get(): - x.childPolicyUpdate.Load() - u := update.(*balancerStateWithPriority) - x.edsImpl.updateState(u.priority, u.s) - case <-x.closed.Done(): - x.cancelWatch() - x.xdsClient.Close() - x.edsImpl.close() - return - } - } -} - -// handleErrorFromUpdate handles both the error from parent ClientConn (from CDS -// balancer) and the error from xds client (from the watcher). fromParent is -// true if error is from parent ClientConn. -// -// If the error is connection error, it should be handled for fallback purposes. -// -// If the error is resource-not-found: -// - If it's from CDS balancer (shows as a resolver error), it means LDS or CDS -// resources were removed. The EDS watch should be canceled. -// - If it's from xds client, it means EDS resource were removed. The EDS -// watcher should keep watching. -// In both cases, the sub-balancers will be closed, and the future picks will -// fail. -func (x *edsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - x.logger.Warningf("Received error: %v", err) - if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound { - if fromParent { - // This is an error from the parent ClientConn (can be the parent - // CDS balancer), and is a resource-not-found error. This means the - // resource (can be either LDS or CDS) was removed. Stop the EDS - // watch. - x.cancelWatch() - } - x.edsImpl.handleEDSResponse(xdsclient.EndpointsUpdate{}) - } -} - -func (x *edsBalancer) handleGRPCUpdate(update interface{}) { - switch u := update.(type) { - case *subConnStateUpdate: - x.edsImpl.handleSubConnStateChange(u.sc, u.state.ConnectivityState) - case *balancer.ClientConnState: - x.logger.Infof("Receive update from resolver, balancer config: %+v", u.BalancerConfig) - cfg, _ := u.BalancerConfig.(*EDSConfig) - if cfg == nil { - // service config parsing failed. should never happen. - return - } - - if err := x.handleServiceConfigUpdate(cfg); err != nil { - x.logger.Warningf("failed to update xDS client: %v", err) - } - - x.edsImpl.updateServiceRequestsConfig(cfg.EDSServiceName, cfg.MaxConcurrentRequests) - - // We will update the edsImpl with the new child policy, if we got a - // different one. - if !cmp.Equal(cfg.ChildPolicy, x.config.ChildPolicy, cmpopts.EquateEmpty()) { - if cfg.ChildPolicy != nil { - x.edsImpl.handleChildPolicy(cfg.ChildPolicy.Name, cfg.ChildPolicy.Config) - } else { - x.edsImpl.handleChildPolicy(roundrobin.Name, nil) - } - } - x.config = cfg - case error: - x.handleErrorFromUpdate(u, true) - default: - // unreachable path - x.logger.Errorf("wrong update type: %T", update) - } -} - -// handleServiceConfigUpdate applies the service config update, watching a new -// EDS service name and restarting LRS stream, as required. -func (x *edsBalancer) handleServiceConfigUpdate(config *EDSConfig) error { - // Restart EDS watch when the edsServiceName has changed. - if x.edsServiceName != config.EDSServiceName { - x.edsServiceName = config.EDSServiceName - x.startEndpointsWatch() - // TODO: this update for the LRS service name is too early. It should - // only apply to the new EDS response. But this is applied to the RPCs - // before the new EDS response. To fully fix this, the EDS balancer - // needs to do a graceful switch to another EDS implementation. - // - // This is OK for now, because we don't actually expect edsServiceName - // to change. Fix this (a bigger change) will happen later. - x.lsw.updateServiceName(x.edsServiceName) - } - - // Restart load reporting when the loadReportServer name has changed. - if !equalStringPointers(x.loadReportServer, config.LrsLoadReportingServerName) { - loadStore := x.startLoadReport(config.LrsLoadReportingServerName) - x.lsw.updateLoadStore(loadStore) - } - - return nil -} - -// startEndpointsWatch starts the EDS watch. -// -// This usually means load report needs to be restarted, but this function does -// NOT do that. Caller needs to call startLoadReport separately. -func (x *edsBalancer) startEndpointsWatch() { - if x.cancelEndpointsWatch != nil { - x.cancelEndpointsWatch() - } - cancelEDSWatch := x.xdsClient.WatchEndpoints(x.edsServiceName, func(update xdsclient.EndpointsUpdate, err error) { - x.logger.Infof("Watch update from xds-client %p, content: %+v", x.xdsClient, update) - x.handleEDSUpdate(update, err) - }) - x.logger.Infof("Watch started on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) - x.cancelEndpointsWatch = func() { - cancelEDSWatch() - x.logger.Infof("Watch cancelled on resource name %v with xds-client %p", x.edsServiceName, x.xdsClient) - } -} - -func (x *edsBalancer) cancelWatch() { - x.loadReportServer = nil - if x.cancelLoadReport != nil { - x.cancelLoadReport() - } - x.edsServiceName = "" - if x.cancelEndpointsWatch != nil { - x.cancelEndpointsWatch() - } -} - -// startLoadReport starts load reporting. If there's already a load reporting in -// progress, it cancels that. -// -// Caller can cal this when the loadReportServer name changes, but -// edsServiceName doesn't (so we only need to restart load reporting, not EDS -// watch). -func (x *edsBalancer) startLoadReport(loadReportServer *string) *load.Store { - x.loadReportServer = loadReportServer - if x.cancelLoadReport != nil { - x.cancelLoadReport() - } - if loadReportServer == nil { - return nil - } - ls, cancel := x.xdsClient.ReportLoad(*loadReportServer) - x.cancelLoadReport = cancel - return ls -} - -func (x *edsBalancer) handleXDSClientUpdate(update *edsUpdate) { - if err := update.err; err != nil { - x.handleErrorFromUpdate(err, false) - return - } - x.edsImpl.handleEDSResponse(update.resp) -} - -type subConnStateUpdate struct { - sc balancer.SubConn - state balancer.SubConnState -} - -func (x *edsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - update := &subConnStateUpdate{ - sc: sc, - state: state, - } - select { - case x.grpcUpdate <- update: - case <-x.closed.Done(): - } -} - -func (x *edsBalancer) ResolverError(err error) { - select { - case x.grpcUpdate <- err: - case <-x.closed.Done(): - } -} - -func (x *edsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - select { - case x.grpcUpdate <- &s: - case <-x.closed.Done(): - } - return nil -} - -type edsUpdate struct { - resp xdsclient.EndpointsUpdate - err error -} - -func (x *edsBalancer) handleEDSUpdate(resp xdsclient.EndpointsUpdate, err error) { - select { - case x.xdsClientUpdate <- &edsUpdate{resp: resp, err: err}: - case <-x.closed.Done(): - } -} - -type balancerStateWithPriority struct { - priority priorityType - s balancer.State -} - -func (x *edsBalancer) enqueueChildBalancerState(p priorityType, s balancer.State) { - x.childPolicyUpdate.Put(&balancerStateWithPriority{ - priority: p, - s: s, - }) -} - -func (x *edsBalancer) Close() { - x.closed.Fire() - x.logger.Infof("Shutdown") -} - -// equalStringPointers returns true if -// - a and b are both nil OR -// - *a == *b (and a and b are both non-nil) -func equalStringPointers(a, b *string) bool { - if a == nil && b == nil { - return true - } - if a == nil || b == nil { - return false - } - return *a == *b -} diff --git a/xds/internal/balancer/edsbalancer/eds_impl.go b/xds/internal/balancer/edsbalancer/eds_impl.go deleted file mode 100644 index 5318a5342e83..000000000000 --- a/xds/internal/balancer/edsbalancer/eds_impl.go +++ /dev/null @@ -1,571 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "encoding/json" - "reflect" - "sync" - "time" - - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/balancer/weightedroundrobin" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator" - "google.golang.org/grpc/xds/internal/client" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" -) - -// TODO: make this a environment variable? -var defaultPriorityInitTimeout = 10 * time.Second - -const defaultServiceRequestCountMax = 1024 - -type localityConfig struct { - weight uint32 - addrs []resolver.Address -} - -// balancerGroupWithConfig contains the localities with the same priority. It -// manages all localities using a balancerGroup. -type balancerGroupWithConfig struct { - bg *balancergroup.BalancerGroup - stateAggregator *weightedaggregator.Aggregator - configs map[internal.LocalityID]*localityConfig -} - -// edsBalancerImpl does load balancing based on the EDS responses. Note that it -// doesn't implement the balancer interface. It's intended to be used by a high -// level balancer implementation. -// -// The localities are picked as weighted round robin. A configurable child -// policy is used to manage endpoints in each locality. -type edsBalancerImpl struct { - cc balancer.ClientConn - buildOpts balancer.BuildOptions - logger *grpclog.PrefixLogger - loadReporter load.PerClusterReporter - - enqueueChildBalancerStateUpdate func(priorityType, balancer.State) - - subBalancerBuilder balancer.Builder - priorityToLocalities map[priorityType]*balancerGroupWithConfig - respReceived bool - - // There's no need to hold any mutexes at the same time. The order to take - // mutex should be: priorityMu > subConnMu, but this is implicit via - // balancers (starting balancer with next priority while holding priorityMu, - // and the balancer may create new SubConn). - - priorityMu sync.Mutex - // priorities are pointers, and will be nil when EDS returns empty result. - priorityInUse priorityType - priorityLowest priorityType - priorityToState map[priorityType]*balancer.State - // The timer to give a priority 10 seconds to connect. And if the priority - // doesn't go into Ready/Failure, start the next priority. - // - // One timer is enough because there can be at most one priority in init - // state. - priorityInitTimer *time.Timer - - subConnMu sync.Mutex - subConnToPriority map[balancer.SubConn]priorityType - - pickerMu sync.Mutex - dropConfig []xdsclient.OverloadDropConfig - drops []*dropper - innerState balancer.State // The state of the picker without drop support. - serviceRequestsCounter *client.ServiceRequestsCounter - serviceRequestCountMax uint32 -} - -// newEDSBalancerImpl create a new edsBalancerImpl. -func newEDSBalancerImpl(cc balancer.ClientConn, bOpts balancer.BuildOptions, enqueueState func(priorityType, balancer.State), lr load.PerClusterReporter, logger *grpclog.PrefixLogger) *edsBalancerImpl { - edsImpl := &edsBalancerImpl{ - cc: cc, - buildOpts: bOpts, - logger: logger, - subBalancerBuilder: balancer.Get(roundrobin.Name), - loadReporter: lr, - - enqueueChildBalancerStateUpdate: enqueueState, - - priorityToLocalities: make(map[priorityType]*balancerGroupWithConfig), - priorityToState: make(map[priorityType]*balancer.State), - subConnToPriority: make(map[balancer.SubConn]priorityType), - serviceRequestCountMax: defaultServiceRequestCountMax, - } - // Don't start balancer group here. Start it when handling the first EDS - // response. Otherwise the balancer group will be started with round-robin, - // and if users specify a different sub-balancer, all balancers in balancer - // group will be closed and recreated when sub-balancer update happens. - return edsImpl -} - -// handleChildPolicy updates the child balancers handling endpoints. Child -// policy is roundrobin by default. If the specified balancer is not installed, -// the old child balancer will be used. -// -// HandleChildPolicy and HandleEDSResponse must be called by the same goroutine. -func (edsImpl *edsBalancerImpl) handleChildPolicy(name string, config json.RawMessage) { - if edsImpl.subBalancerBuilder.Name() == name { - return - } - newSubBalancerBuilder := balancer.Get(name) - if newSubBalancerBuilder == nil { - edsImpl.logger.Infof("edsBalancerImpl: failed to find balancer with name %q, keep using %q", name, edsImpl.subBalancerBuilder.Name()) - return - } - edsImpl.subBalancerBuilder = newSubBalancerBuilder - for _, bgwc := range edsImpl.priorityToLocalities { - if bgwc == nil { - continue - } - for lid, config := range bgwc.configs { - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - // TODO: (eds) add support to balancer group to support smoothly - // switching sub-balancers (keep old balancer around until new - // balancer becomes ready). - bgwc.bg.Remove(lidJSON) - bgwc.bg.Add(lidJSON, edsImpl.subBalancerBuilder) - bgwc.bg.UpdateClientConnState(lidJSON, balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: config.addrs}, - }) - // This doesn't need to manually update picker, because the new - // sub-balancer will send it's picker later. - } - } -} - -// updateDrops compares new drop policies with the old. If they are different, -// it updates the drop policies and send ClientConn an updated picker. -func (edsImpl *edsBalancerImpl) updateDrops(dropConfig []xdsclient.OverloadDropConfig) { - if cmp.Equal(dropConfig, edsImpl.dropConfig) { - return - } - edsImpl.pickerMu.Lock() - edsImpl.dropConfig = dropConfig - var newDrops []*dropper - for _, c := range edsImpl.dropConfig { - newDrops = append(newDrops, newDropper(c)) - } - edsImpl.drops = newDrops - if edsImpl.innerState.Picker != nil { - // Update picker with old inner picker, new drops. - edsImpl.cc.UpdateState(balancer.State{ - ConnectivityState: edsImpl.innerState.ConnectivityState, - Picker: newDropPicker(edsImpl.innerState.Picker, newDrops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}, - ) - } - edsImpl.pickerMu.Unlock() -} - -// handleEDSResponse handles the EDS response and creates/deletes localities and -// SubConns. It also handles drops. -// -// HandleChildPolicy and HandleEDSResponse must be called by the same goroutine. -func (edsImpl *edsBalancerImpl) handleEDSResponse(edsResp xdsclient.EndpointsUpdate) { - // TODO: Unhandled fields from EDS response: - // - edsResp.GetPolicy().GetOverprovisioningFactor() - // - locality.GetPriority() - // - lbEndpoint.GetMetadata(): contains BNS name, send to sub-balancers - // - as service config or as resolved address - // - if socketAddress is not ip:port - // - socketAddress.GetNamedPort(), socketAddress.GetResolverName() - // - resolve endpoint's name with another resolver - - // If the first EDS update is an empty update, nothing is changing from the - // previous update (which is the default empty value). We need to explicitly - // handle first update being empty, and send a transient failure picker. - // - // TODO: define Equal() on type EndpointUpdate to avoid DeepEqual. And do - // the same for the other types. - if !edsImpl.respReceived && reflect.DeepEqual(edsResp, xdsclient.EndpointsUpdate{}) { - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(errAllPrioritiesRemoved)}) - } - edsImpl.respReceived = true - - edsImpl.updateDrops(edsResp.Drops) - - // Filter out all localities with weight 0. - // - // Locality weighted load balancer can be enabled by setting an option in - // CDS, and the weight of each locality. Currently, without the guarantee - // that CDS is always sent, we assume locality weighted load balance is - // always enabled, and ignore all weight 0 localities. - // - // In the future, we should look at the config in CDS response and decide - // whether locality weight matters. - newLocalitiesWithPriority := make(map[priorityType][]xdsclient.Locality) - for _, locality := range edsResp.Localities { - if locality.Weight == 0 { - continue - } - priority := newPriorityType(locality.Priority) - newLocalitiesWithPriority[priority] = append(newLocalitiesWithPriority[priority], locality) - } - - var ( - priorityLowest priorityType - priorityChanged bool - ) - - for priority, newLocalities := range newLocalitiesWithPriority { - if !priorityLowest.isSet() || priorityLowest.higherThan(priority) { - priorityLowest = priority - } - - bgwc, ok := edsImpl.priorityToLocalities[priority] - if !ok { - // Create balancer group if it's never created (this is the first - // time this priority is received). We don't start it here. It may - // be started when necessary (e.g. when higher is down, or if it's a - // new lowest priority). - ccPriorityWrapper := edsImpl.ccWrapperWithPriority(priority) - stateAggregator := weightedaggregator.New(ccPriorityWrapper, edsImpl.logger, newRandomWRR) - bgwc = &balancerGroupWithConfig{ - bg: balancergroup.New(ccPriorityWrapper, edsImpl.buildOpts, stateAggregator, edsImpl.loadReporter, edsImpl.logger), - stateAggregator: stateAggregator, - configs: make(map[internal.LocalityID]*localityConfig), - } - edsImpl.priorityToLocalities[priority] = bgwc - priorityChanged = true - edsImpl.logger.Infof("New priority %v added", priority) - } - edsImpl.handleEDSResponsePerPriority(bgwc, newLocalities) - } - edsImpl.priorityLowest = priorityLowest - - // Delete priorities that are removed in the latest response, and also close - // the balancer group. - for p, bgwc := range edsImpl.priorityToLocalities { - if _, ok := newLocalitiesWithPriority[p]; !ok { - delete(edsImpl.priorityToLocalities, p) - bgwc.bg.Close() - delete(edsImpl.priorityToState, p) - priorityChanged = true - edsImpl.logger.Infof("Priority %v deleted", p) - } - } - - // If priority was added/removed, it may affect the balancer group to use. - // E.g. priorityInUse was removed, or all priorities are down, and a new - // lower priority was added. - if priorityChanged { - edsImpl.handlePriorityChange() - } -} - -func (edsImpl *edsBalancerImpl) handleEDSResponsePerPriority(bgwc *balancerGroupWithConfig, newLocalities []xdsclient.Locality) { - // newLocalitiesSet contains all names of localities in the new EDS response - // for the same priority. It's used to delete localities that are removed in - // the new EDS response. - newLocalitiesSet := make(map[internal.LocalityID]struct{}) - var rebuildStateAndPicker bool - for _, locality := range newLocalities { - // One balancer for each locality. - - lid := locality.ID - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - newLocalitiesSet[lid] = struct{}{} - - newWeight := locality.Weight - var newAddrs []resolver.Address - for _, lbEndpoint := range locality.Endpoints { - // Filter out all "unhealthy" endpoints (unknown and - // healthy are both considered to be healthy: - // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus). - if lbEndpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && - lbEndpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown { - continue - } - - address := resolver.Address{ - Addr: lbEndpoint.Address, - } - if edsImpl.subBalancerBuilder.Name() == weightedroundrobin.Name && lbEndpoint.Weight != 0 { - ai := weightedroundrobin.AddrInfo{Weight: lbEndpoint.Weight} - address = weightedroundrobin.SetAddrInfo(address, ai) - // Metadata field in resolver.Address is deprecated. The - // attributes field should be used to specify arbitrary - // attributes about the address. We still need to populate the - // Metadata field here to allow users of this field to migrate - // to the new one. - // TODO(easwars): Remove this once all users have migrated. - // See https://github.com/grpc/grpc-go/issues/3563. - address.Metadata = &ai - } - newAddrs = append(newAddrs, address) - } - var weightChanged, addrsChanged bool - config, ok := bgwc.configs[lid] - if !ok { - // A new balancer, add it to balancer group and balancer map. - bgwc.stateAggregator.Add(lidJSON, newWeight) - bgwc.bg.Add(lidJSON, edsImpl.subBalancerBuilder) - config = &localityConfig{ - weight: newWeight, - } - bgwc.configs[lid] = config - - // weightChanged is false for new locality, because there's no need - // to update weight in bg. - addrsChanged = true - edsImpl.logger.Infof("New locality %v added", lid) - } else { - // Compare weight and addrs. - if config.weight != newWeight { - weightChanged = true - } - if !cmp.Equal(config.addrs, newAddrs) { - addrsChanged = true - } - edsImpl.logger.Infof("Locality %v updated, weightedChanged: %v, addrsChanged: %v", lid, weightChanged, addrsChanged) - } - - if weightChanged { - config.weight = newWeight - bgwc.stateAggregator.UpdateWeight(lidJSON, newWeight) - rebuildStateAndPicker = true - } - - if addrsChanged { - config.addrs = newAddrs - bgwc.bg.UpdateClientConnState(lidJSON, balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: newAddrs}, - }) - } - } - - // Delete localities that are removed in the latest response. - for lid := range bgwc.configs { - lidJSON, err := lid.ToString() - if err != nil { - edsImpl.logger.Errorf("failed to marshal LocalityID: %#v, skipping this locality", lid) - continue - } - if _, ok := newLocalitiesSet[lid]; !ok { - bgwc.stateAggregator.Remove(lidJSON) - bgwc.bg.Remove(lidJSON) - delete(bgwc.configs, lid) - edsImpl.logger.Infof("Locality %v deleted", lid) - rebuildStateAndPicker = true - } - } - - if rebuildStateAndPicker { - bgwc.stateAggregator.BuildAndUpdate() - } -} - -// handleSubConnStateChange handles the state change and update pickers accordingly. -func (edsImpl *edsBalancerImpl) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - edsImpl.subConnMu.Lock() - var bgwc *balancerGroupWithConfig - if p, ok := edsImpl.subConnToPriority[sc]; ok { - if s == connectivity.Shutdown { - // Only delete sc from the map when state changed to Shutdown. - delete(edsImpl.subConnToPriority, sc) - } - bgwc = edsImpl.priorityToLocalities[p] - } - edsImpl.subConnMu.Unlock() - if bgwc == nil { - edsImpl.logger.Infof("edsBalancerImpl: priority not found for sc state change") - return - } - if bg := bgwc.bg; bg != nil { - bg.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) - } -} - -// updateServiceRequestsConfig handles changes to the circuit breaking configuration. -func (edsImpl *edsBalancerImpl) updateServiceRequestsConfig(serviceName string, max *uint32) { - if !env.CircuitBreakingSupport { - return - } - edsImpl.pickerMu.Lock() - var updatePicker bool - if edsImpl.serviceRequestsCounter == nil || edsImpl.serviceRequestsCounter.ServiceName != serviceName { - edsImpl.serviceRequestsCounter = client.GetServiceRequestsCounter(serviceName) - updatePicker = true - } - - var newMax uint32 = defaultServiceRequestCountMax - if max != nil { - newMax = *max - } - if edsImpl.serviceRequestCountMax != newMax { - edsImpl.serviceRequestCountMax = newMax - updatePicker = true - } - if updatePicker && edsImpl.innerState.Picker != nil { - // Update picker with old inner picker, new counter and counterMax. - edsImpl.cc.UpdateState(balancer.State{ - ConnectivityState: edsImpl.innerState.ConnectivityState, - Picker: newDropPicker(edsImpl.innerState.Picker, edsImpl.drops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}, - ) - } - edsImpl.pickerMu.Unlock() -} - -// updateState first handles priority, and then wraps picker in a drop picker -// before forwarding the update. -func (edsImpl *edsBalancerImpl) updateState(priority priorityType, s balancer.State) { - _, ok := edsImpl.priorityToLocalities[priority] - if !ok { - edsImpl.logger.Infof("eds: received picker update from unknown priority") - return - } - - if edsImpl.handlePriorityWithNewState(priority, s) { - edsImpl.pickerMu.Lock() - defer edsImpl.pickerMu.Unlock() - edsImpl.innerState = s - // Don't reset drops when it's a state change. - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: newDropPicker(s.Picker, edsImpl.drops, edsImpl.loadReporter, edsImpl.serviceRequestsCounter, edsImpl.serviceRequestCountMax)}) - } -} - -func (edsImpl *edsBalancerImpl) ccWrapperWithPriority(priority priorityType) *edsBalancerWrapperCC { - return &edsBalancerWrapperCC{ - ClientConn: edsImpl.cc, - priority: priority, - parent: edsImpl, - } -} - -// edsBalancerWrapperCC implements the balancer.ClientConn API and get passed to -// each balancer group. It contains the locality priority. -type edsBalancerWrapperCC struct { - balancer.ClientConn - priority priorityType - parent *edsBalancerImpl -} - -func (ebwcc *edsBalancerWrapperCC) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - return ebwcc.parent.newSubConn(ebwcc.priority, addrs, opts) -} -func (ebwcc *edsBalancerWrapperCC) UpdateState(state balancer.State) { - ebwcc.parent.enqueueChildBalancerStateUpdate(ebwcc.priority, state) -} - -func (edsImpl *edsBalancerImpl) newSubConn(priority priorityType, addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - sc, err := edsImpl.cc.NewSubConn(addrs, opts) - if err != nil { - return nil, err - } - edsImpl.subConnMu.Lock() - edsImpl.subConnToPriority[sc] = priority - edsImpl.subConnMu.Unlock() - return sc, nil -} - -// close closes the balancer. -func (edsImpl *edsBalancerImpl) close() { - for _, bgwc := range edsImpl.priorityToLocalities { - if bg := bgwc.bg; bg != nil { - bgwc.stateAggregator.Stop() - bg.Close() - } - } -} - -type dropPicker struct { - drops []*dropper - p balancer.Picker - loadStore load.PerClusterReporter - counter *client.ServiceRequestsCounter - countMax uint32 -} - -func newDropPicker(p balancer.Picker, drops []*dropper, loadStore load.PerClusterReporter, counter *client.ServiceRequestsCounter, countMax uint32) *dropPicker { - return &dropPicker{ - drops: drops, - p: p, - loadStore: loadStore, - counter: counter, - countMax: countMax, - } -} - -func (d *dropPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - var ( - drop bool - category string - ) - for _, dp := range d.drops { - if dp.drop() { - drop = true - category = dp.c.Category - break - } - } - if drop { - if d.loadStore != nil { - d.loadStore.CallDropped(category) - } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, "RPC is dropped") - } - if d.counter != nil { - if err := d.counter.StartRequest(d.countMax); err != nil { - // Drops by circuit breaking are reported with empty category. They - // will be reported only in total drops, but not in per category. - if d.loadStore != nil { - d.loadStore.CallDropped("") - } - return balancer.PickResult{}, status.Errorf(codes.Unavailable, err.Error()) - } - pr, err := d.p.Pick(info) - if err != nil { - d.counter.EndRequest() - return pr, err - } - oldDone := pr.Done - pr.Done = func(doneInfo balancer.DoneInfo) { - d.counter.EndRequest() - if oldDone != nil { - oldDone(doneInfo) - } - } - return pr, err - } - // TODO: (eds) don't drop unless the inner picker is READY. Similar to - // https://github.com/grpc/grpc-go/issues/2622. - return d.p.Pick(info) -} diff --git a/xds/internal/balancer/edsbalancer/eds_impl_priority.go b/xds/internal/balancer/edsbalancer/eds_impl_priority.go deleted file mode 100644 index 53ac6ef5e873..000000000000 --- a/xds/internal/balancer/edsbalancer/eds_impl_priority.go +++ /dev/null @@ -1,358 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "errors" - "fmt" - "time" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" - "google.golang.org/grpc/connectivity" -) - -var errAllPrioritiesRemoved = errors.New("eds: no locality is provided, all priorities are removed") - -// handlePriorityChange handles priority after EDS adds/removes a -// priority. -// -// - If all priorities were deleted, unset priorityInUse, and set parent -// ClientConn to TransientFailure -// - If priorityInUse wasn't set, this is either the first EDS resp, or the -// previous EDS resp deleted everything. Set priorityInUse to 0, and start 0. -// - If priorityInUse was deleted, send the picker from the new lowest priority -// to parent ClientConn, and set priorityInUse to the new lowest. -// - If priorityInUse has a non-Ready state, and also there's a priority lower -// than priorityInUse (which means a lower priority was added), set the next -// priority as new priorityInUse, and start the bg. -func (edsImpl *edsBalancerImpl) handlePriorityChange() { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - - // Everything was removed by EDS. - if !edsImpl.priorityLowest.isSet() { - edsImpl.priorityInUse = newPriorityTypeUnset() - // Stop the init timer. This can happen if the only priority is removed - // shortly after it's added. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, Picker: base.NewErrPicker(errAllPrioritiesRemoved)}) - return - } - - // priorityInUse wasn't set, use 0. - if !edsImpl.priorityInUse.isSet() { - edsImpl.logger.Infof("Switching priority from unset to %v", 0) - edsImpl.startPriority(newPriorityType(0)) - return - } - - // priorityInUse was deleted, use the new lowest. - if _, ok := edsImpl.priorityToLocalities[edsImpl.priorityInUse]; !ok { - oldP := edsImpl.priorityInUse - edsImpl.priorityInUse = edsImpl.priorityLowest - edsImpl.logger.Infof("Switching priority from %v to %v, because former was deleted", oldP, edsImpl.priorityInUse) - if s, ok := edsImpl.priorityToState[edsImpl.priorityLowest]; ok { - edsImpl.cc.UpdateState(*s) - } else { - // If state for priorityLowest is not found, this means priorityLowest was - // started, but never sent any update. The init timer fired and - // triggered the next priority. The old_priorityInUse (that was just - // deleted EDS) was picked later. - // - // We don't have an old state to send to parent, but we also don't - // want parent to keep using picker from old_priorityInUse. Send an - // update to trigger block picks until a new picker is ready. - edsImpl.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable)}) - } - return - } - - // priorityInUse is not ready, look for next priority, and use if found. - if s, ok := edsImpl.priorityToState[edsImpl.priorityInUse]; ok && s.ConnectivityState != connectivity.Ready { - pNext := edsImpl.priorityInUse.nextLower() - if _, ok := edsImpl.priorityToLocalities[pNext]; ok { - edsImpl.logger.Infof("Switching priority from %v to %v, because latter was added, and former wasn't Ready") - edsImpl.startPriority(pNext) - } - } -} - -// startPriority sets priorityInUse to p, and starts the balancer group for p. -// It also starts a timer to fall to next priority after timeout. -// -// Caller must hold priorityMu, priority must exist, and edsImpl.priorityInUse -// must be non-nil. -func (edsImpl *edsBalancerImpl) startPriority(priority priorityType) { - edsImpl.priorityInUse = priority - p := edsImpl.priorityToLocalities[priority] - // NOTE: this will eventually send addresses to sub-balancers. If the - // sub-balancer tries to update picker, it will result in a deadlock on - // priorityMu in the update is handled synchronously. The deadlock is - // currently avoided by handling balancer update in a goroutine (the run - // goroutine in the parent eds balancer). When priority balancer is split - // into its own, this asynchronous state handling needs to be copied. - p.stateAggregator.Start() - p.bg.Start() - // startPriority can be called when - // 1. first EDS resp, start p0 - // 2. a high priority goes Failure, start next - // 3. a high priority init timeout, start next - // - // In all the cases, the existing init timer is either closed, also already - // expired. There's no need to close the old timer. - edsImpl.priorityInitTimer = time.AfterFunc(defaultPriorityInitTimeout, func() { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - if !edsImpl.priorityInUse.isSet() || !edsImpl.priorityInUse.equal(priority) { - return - } - edsImpl.priorityInitTimer = nil - pNext := priority.nextLower() - if _, ok := edsImpl.priorityToLocalities[pNext]; ok { - edsImpl.startPriority(pNext) - } - }) -} - -// handlePriorityWithNewState start/close priorities based on the connectivity -// state. It returns whether the state should be forwarded to parent ClientConn. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewState(priority priorityType, s balancer.State) bool { - edsImpl.priorityMu.Lock() - defer edsImpl.priorityMu.Unlock() - - if !edsImpl.priorityInUse.isSet() { - edsImpl.logger.Infof("eds: received picker update when no priority is in use (EDS returned an empty list)") - return false - } - - if edsImpl.priorityInUse.higherThan(priority) { - // Lower priorities should all be closed, this is an unexpected update. - edsImpl.logger.Infof("eds: received picker update from priority lower then priorityInUse") - return false - } - - bState, ok := edsImpl.priorityToState[priority] - if !ok { - bState = &balancer.State{} - edsImpl.priorityToState[priority] = bState - } - oldState := bState.ConnectivityState - *bState = s - - switch s.ConnectivityState { - case connectivity.Ready: - return edsImpl.handlePriorityWithNewStateReady(priority) - case connectivity.TransientFailure: - return edsImpl.handlePriorityWithNewStateTransientFailure(priority) - case connectivity.Connecting: - return edsImpl.handlePriorityWithNewStateConnecting(priority, oldState) - default: - // New state is Idle, should never happen. Don't forward. - return false - } -} - -// handlePriorityWithNewStateReady handles state Ready and decides whether to -// forward update or not. -// -// An update with state Ready: -// - If it's from higher priority: -// - Forward the update -// - Set the priority as priorityInUse -// - Close all priorities lower than this one -// - If it's from priorityInUse: -// - Forward and do nothing else -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateReady(priority priorityType) bool { - // If one priority higher or equal to priorityInUse goes Ready, stop the - // init timer. If update is from higher than priorityInUse, - // priorityInUse will be closed, and the init timer will become useless. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - - if edsImpl.priorityInUse.lowerThan(priority) { - edsImpl.logger.Infof("Switching priority from %v to %v, because latter became Ready", edsImpl.priorityInUse, priority) - edsImpl.priorityInUse = priority - for i := priority.nextLower(); !i.lowerThan(edsImpl.priorityLowest); i = i.nextLower() { - bgwc := edsImpl.priorityToLocalities[i] - bgwc.stateAggregator.Stop() - bgwc.bg.Close() - } - return true - } - return true -} - -// handlePriorityWithNewStateTransientFailure handles state TransientFailure and -// decides whether to forward update or not. -// -// An update with state Failure: -// - If it's from a higher priority: -// - Do not forward, and do nothing -// - If it's from priorityInUse: -// - If there's no lower: -// - Forward and do nothing else -// - If there's a lower priority: -// - Forward -// - Set lower as priorityInUse -// - Start lower -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateTransientFailure(priority priorityType) bool { - if edsImpl.priorityInUse.lowerThan(priority) { - return false - } - // priorityInUse sends a failure. Stop its init timer. - if timer := edsImpl.priorityInitTimer; timer != nil { - timer.Stop() - edsImpl.priorityInitTimer = nil - } - pNext := priority.nextLower() - if _, okNext := edsImpl.priorityToLocalities[pNext]; !okNext { - return true - } - edsImpl.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, pNext) - edsImpl.startPriority(pNext) - return true -} - -// handlePriorityWithNewStateConnecting handles state Connecting and decides -// whether to forward update or not. -// -// An update with state Connecting: -// - If it's from a higher priority -// - Do nothing -// - If it's from priorityInUse, the behavior depends on previous state. -// -// When new state is Connecting, the behavior depends on previous state. If the -// previous state was Ready, this is a transition out from Ready to Connecting. -// Assuming there are multiple backends in the same priority, this mean we are -// in a bad situation and we should failover to the next priority (Side note: -// the current connectivity state aggregating algorhtim (e.g. round-robin) is -// not handling this right, because if many backends all go from Ready to -// Connecting, the overall situation is more like TransientFailure, not -// Connecting). -// -// If the previous state was Idle, we don't do anything special with failure, -// and simply forward the update. The init timer should be in process, will -// handle failover if it timeouts. If the previous state was TransientFailure, -// we do not forward, because the lower priority is in use. -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold priorityMu. -func (edsImpl *edsBalancerImpl) handlePriorityWithNewStateConnecting(priority priorityType, oldState connectivity.State) bool { - if edsImpl.priorityInUse.lowerThan(priority) { - return false - } - - switch oldState { - case connectivity.Ready: - pNext := priority.nextLower() - if _, okNext := edsImpl.priorityToLocalities[pNext]; !okNext { - return true - } - edsImpl.logger.Infof("Switching priority from %v to %v, because former became Connecting from Ready", priority, pNext) - edsImpl.startPriority(pNext) - return true - case connectivity.Idle: - return true - case connectivity.TransientFailure: - return false - default: - // Old state is Connecting or Shutdown. Don't forward. - return false - } -} - -// priorityType represents the priority from EDS response. -// -// 0 is the highest priority. The bigger the number, the lower the priority. -type priorityType struct { - set bool - p uint32 -} - -func newPriorityType(p uint32) priorityType { - return priorityType{ - set: true, - p: p, - } -} - -func newPriorityTypeUnset() priorityType { - return priorityType{} -} - -func (p priorityType) isSet() bool { - return p.set -} - -func (p priorityType) equal(p2 priorityType) bool { - if !p.isSet() && !p2.isSet() { - return true - } - if !p.isSet() || !p2.isSet() { - return false - } - return p == p2 -} - -func (p priorityType) higherThan(p2 priorityType) bool { - if !p.isSet() || !p2.isSet() { - // TODO(menghanl): return an appropriate value instead of panic. - panic("priority unset") - } - return p.p < p2.p -} - -func (p priorityType) lowerThan(p2 priorityType) bool { - if !p.isSet() || !p2.isSet() { - // TODO(menghanl): return an appropriate value instead of panic. - panic("priority unset") - } - return p.p > p2.p -} - -func (p priorityType) nextLower() priorityType { - if !p.isSet() { - panic("priority unset") - } - return priorityType{ - set: true, - p: p.p + 1, - } -} - -func (p priorityType) String() string { - if !p.set { - return "Nil" - } - return fmt.Sprint(p.p) -} diff --git a/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go b/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go deleted file mode 100644 index 7696feb5bd04..000000000000 --- a/xds/internal/balancer/edsbalancer/eds_impl_priority_test.go +++ /dev/null @@ -1,843 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "context" - "testing" - "time" - - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/xds/internal/testutils" -) - -// When a high priority is ready, adding/removing lower locality doesn't cause -// changes. -// -// Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0. -func (s) TestEDSPriority_HighPriorityReady(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, with priorities [0, 1], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - - // p0 is ready. - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Add p2, it shouldn't cause any udpates. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) - - select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") - case <-cc.NewSubConnCh: - t.Fatalf("got unexpected new SubConn") - case <-cc.RemoveSubConnCh: - t.Fatalf("got unexpected remove SubConn") - case <-time.After(defaultTestShortTimeout): - } - - // Remove p2, no updates. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build())) - - select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") - case <-cc.NewSubConnCh: - t.Fatalf("got unexpected new SubConn") - case <-cc.RemoveSubConnCh: - t.Fatalf("got unexpected remove SubConn") - case <-time.After(defaultTestShortTimeout): - } -} - -// Lower priority is used when higher priority is not ready. -// -// Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is -// down, use 2; remove 2, use 1. -func (s) TestEDSPriority_SwitchPriority(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, with priorities [0, 1], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // p0 is ready. - edsb.handleSubConnStateChange(sc0, connectivity.Connecting) - edsb.handleSubConnStateChange(sc0, connectivity.Ready) - - // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn down 0, 1 is used. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Test pick with 1. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - } - - // Add p2, it shouldn't cause any udpates. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) - - select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") - case <-cc.NewSubConnCh: - t.Fatalf("got unexpected new SubConn") - case <-cc.RemoveSubConnCh: - t.Fatalf("got unexpected remove SubConn") - case <-time.After(defaultTestShortTimeout): - } - - // Turn down 1, use 2 - edsb.handleSubConnStateChange(sc1, connectivity.TransientFailure) - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - - // Test pick with 2. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } - } - - // Remove 2, use 1. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab3.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build())) - - // p2 SubConns are removed. - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove) - } - - // Should get an update with 1's old picker, to override 2's old picker. - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p3.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) - } - } -} - -// Add a lower priority while the higher priority is down. -// -// Init 0 and 1; 0 and 1 both down; add 2, use 2. -func (s) TestEDSPriority_HigherDownWhileAddingLower(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, with different priorities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // Turn down 0, 1 is used. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - // Turn down 1, pick should error. - edsb.handleSubConnStateChange(sc1, connectivity.TransientFailure) - - // Test pick failure. - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != balancer.ErrTransientFailure { - t.Fatalf("want pick error %v, got %v", balancer.ErrTransientFailure, err) - } - } - - // Add p2, it should create a new SubConn. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab2.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) - - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - - // Test pick with 2. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } - } -} - -// When a higher priority becomes available, all lower priorities are closed. -// -// Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2. -func (s) TestEDSPriority_HigherReadyCloseAllLower(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, with priorities [0,1,2], each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab1.AddLocality(testSubZones[2], 1, 2, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // Turn down 0, 1 is used. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - // Turn down 1, 2 is used. - edsb.handleSubConnStateChange(sc1, connectivity.TransientFailure) - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - - // Test pick with 2. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } - } - - // When 0 becomes ready, 0 should be used, 1 and 2 should all be closed. - edsb.handleSubConnStateChange(sc0, connectivity.Ready) - - // sc1 and sc2 should be removed. - // - // With localities caching, the lower priorities are closed after a timeout, - // in goroutines. The order is no longer guaranteed. - scToRemove := []balancer.SubConn{<-cc.RemoveSubConnCh, <-cc.RemoveSubConnCh} - if !(cmp.Equal(scToRemove[0], sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && - cmp.Equal(scToRemove[1], sc2, cmp.AllowUnexported(testutils.TestSubConn{}))) && - !(cmp.Equal(scToRemove[0], sc2, cmp.AllowUnexported(testutils.TestSubConn{})) && - cmp.Equal(scToRemove[1], sc1, cmp.AllowUnexported(testutils.TestSubConn{}))) { - t.Errorf("RemoveSubConn, want [%v, %v], got %v", sc1, sc2, scToRemove) - } - - // Test pick with 0. - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p0.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) - } - } -} - -// At init, start the next lower priority after timeout if the higher priority -// doesn't get ready. -// -// Init 0,1; 0 is not ready (in connecting), after timeout, use 1. -func (s) TestEDSPriority_InitTimeout(t *testing.T) { - const testPriorityInitTimeout = time.Second - defer func() func() { - old := defaultPriorityInitTimeout - defaultPriorityInitTimeout = testPriorityInitTimeout - return func() { - defaultPriorityInitTimeout = old - } - }()() - - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, with different priorities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - - // Keep 0 in connecting, 1 will be used after init timeout. - edsb.handleSubConnStateChange(sc0, connectivity.Connecting) - - // Make sure new SubConn is created before timeout. - select { - case <-time.After(testPriorityInitTimeout * 3 / 4): - case <-cc.NewSubConnAddrsCh: - t.Fatalf("Got a new SubConn too early (Within timeout). Expect a new SubConn only after timeout") - } - - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Test pick with 1. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - } -} - -// Add localities to existing priorities. -// -// - start with 2 locality with p0 and p1 -// - add localities to existing p0 and p1 -func (s) TestEDSPriority_MultipleLocalities(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, with different priorities, each with one backend. - clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab0.Build())) - - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc0, connectivity.Connecting) - edsb.handleSubConnStateChange(sc0, connectivity.Ready) - - // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn down p0 subconns, p1 subconns will be created. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) - - addrs1 := <-cc.NewSubConnAddrsCh - if got, want := addrs1[0].Addr, testEndpointAddrs[1]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Test roundrobin with only p1 subconns. - p1 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Reconnect p0 subconns, p1 subconn will be closed. - edsb.handleSubConnStateChange(sc0, connectivity.Ready) - - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - - // Test roundrobin with only p0 subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Add two localities, with two priorities, with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - clab1.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - clab1.AddLocality(testSubZones[3], 1, 1, testEndpointAddrs[3:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - addrs2 := <-cc.NewSubConnAddrsCh - if got, want := addrs2[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - - // Test roundrobin with only two p0 subconns. - p3 := <-cc.NewPickerCh - want = []balancer.SubConn{sc0, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Turn down p0 subconns, p1 subconns will be created. - edsb.handleSubConnStateChange(sc0, connectivity.TransientFailure) - edsb.handleSubConnStateChange(sc2, connectivity.TransientFailure) - - sc3 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc3, connectivity.Connecting) - edsb.handleSubConnStateChange(sc3, connectivity.Ready) - sc4 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc4, connectivity.Connecting) - edsb.handleSubConnStateChange(sc4, connectivity.Ready) - - // Test roundrobin with only p1 subconns. - p4 := <-cc.NewPickerCh - want = []balancer.SubConn{sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// EDS removes all localities, and re-adds them. -func (s) TestEDSPriority_RemovesAllLocalities(t *testing.T) { - const testPriorityInitTimeout = time.Second - defer func() func() { - old := defaultPriorityInitTimeout - defaultPriorityInitTimeout = testPriorityInitTimeout - return func() { - defaultPriorityInitTimeout = old - } - }()() - - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, with different priorities, each with one backend. - clab0 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab0.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab0.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab0.Build())) - - addrs0 := <-cc.NewSubConnAddrsCh - if got, want := addrs0[0].Addr, testEndpointAddrs[0]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc0 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc0, connectivity.Connecting) - edsb.handleSubConnStateChange(sc0, connectivity.Ready) - - // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove all priorities. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - // p0 subconn should be removed. - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc0, scToRemove) - } - - // Test pick return TransientFailure. - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != errAllPrioritiesRemoved { - t.Fatalf("want pick error %v, got %v", errAllPrioritiesRemoved, err) - } - } - - // Re-add two localities, with previous priorities, but different backends. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - clab2.AddLocality(testSubZones[1], 1, 1, testEndpointAddrs[3:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) - - addrs01 := <-cc.NewSubConnAddrsCh - if got, want := addrs01[0].Addr, testEndpointAddrs[2]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc01 := <-cc.NewSubConnCh - - // Don't send any update to p0, so to not override the old state of p0. - // Later, connect to p1 and then remove p1. This will fallback to p0, and - // will send p0's old picker if they are not correctly removed. - - // p1 will be used after priority init timeout. - addrs11 := <-cc.NewSubConnAddrsCh - if got, want := addrs11[0].Addr, testEndpointAddrs[3]; got != want { - t.Fatalf("sc is created with addr %v, want %v", got, want) - } - sc11 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc11, connectivity.Connecting) - edsb.handleSubConnStateChange(sc11, connectivity.Ready) - - // Test roundrobin with only p1 subconns. - p1 := <-cc.NewPickerCh - want = []balancer.SubConn{sc11} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove p1 from EDS, to fallback to p0. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build())) - - // p1 subconn should be removed. - scToRemove1 := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove1, sc11, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc11, scToRemove1) - } - - // Test pick return TransientFailure. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if scst, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error _, %v, got %v, _ ,%v", balancer.ErrTransientFailure, scst, err) - } - } - - // Send an ready update for the p0 sc that was received when re-adding - // localities to EDS. - edsb.handleSubConnStateChange(sc01, connectivity.Connecting) - edsb.handleSubConnStateChange(sc01, connectivity.Ready) - - // Test roundrobin with only p0 subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc01} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") - case <-cc.NewSubConnCh: - t.Fatalf("got unexpected new SubConn") - case <-cc.RemoveSubConnCh: - t.Fatalf("got unexpected remove SubConn") - case <-time.After(defaultTestShortTimeout): - } -} - -func (s) TestPriorityType(t *testing.T) { - p0 := newPriorityType(0) - p1 := newPriorityType(1) - p2 := newPriorityType(2) - - if !p0.higherThan(p1) || !p0.higherThan(p2) { - t.Errorf("want p0 to be higher than p1 and p2, got p0>p1: %v, p0>p2: %v", !p0.higherThan(p1), !p0.higherThan(p2)) - } - if !p1.lowerThan(p0) || !p1.higherThan(p2) { - t.Errorf("want p1 to be between p0 and p2, got p1p2: %v", !p1.lowerThan(p0), !p1.higherThan(p2)) - } - if !p2.lowerThan(p0) || !p2.lowerThan(p1) { - t.Errorf("want p2 to be lower than p0 and p1, got p2") - } else if i > 50 && err != nil { - t.Errorf("The second 50%% picks should be non-drops, got error %v", err) - } - } - - // The same locality, remove drops. - clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab6.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab6.Build())) - - // Pick without drops. - p6 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p6.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc3, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc3) - } - } -} - -// 2 locality -// - start with 2 locality -// - add locality -// - remove locality -// - address change for the locality -// - update locality weight -func (s) TestEDS_TwoLocalities(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Add the second locality later to make sure sc2 belongs to the second - // locality. Otherwise the test is flaky because of a map is used in EDS to - // keep localities. - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - - // Test roundrobin with two subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Add another locality, with one backend. - clab2 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab2.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab2.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - clab2.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab2.Build())) - - sc3 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc3, connectivity.Connecting) - edsb.handleSubConnStateChange(sc3, connectivity.Ready) - - // Test roundrobin with three subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc1, sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Remove first locality. - clab3 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab3.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - clab3.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:3], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab3.Build())) - - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - edsb.handleSubConnStateChange(scToRemove, connectivity.Shutdown) - - // Test pick with two subconns (without the first one). - p3 := <-cc.NewPickerCh - want = []balancer.SubConn{sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Add a backend to the last locality. - clab4 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab4.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - clab4.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab4.Build())) - - sc4 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc4, connectivity.Connecting) - edsb.handleSubConnStateChange(sc4, connectivity.Ready) - - // Test pick with two subconns (without the first one). - p4 := <-cc.NewPickerCh - // Locality-1 will be picked twice, and locality-2 will be picked twice. - // Locality-1 contains only sc2, locality-2 contains sc3 and sc4. So expect - // two sc2's and sc3, sc4. - want = []balancer.SubConn{sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p4)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Change weight of the locality[1]. - clab5 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab5.AddLocality(testSubZones[1], 2, 0, testEndpointAddrs[1:2], nil) - clab5.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab5.Build())) - - // Test pick with two subconns different locality weight. - p5 := <-cc.NewPickerCh - // Locality-1 will be picked four times, and locality-2 will be picked twice - // (weight 2 and 1). Locality-1 contains only sc2, locality-2 contains sc3 and - // sc4. So expect four sc2's and sc3, sc4. - want = []balancer.SubConn{sc2, sc2, sc2, sc2, sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p5)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - // Change weight of the locality[1] to 0, it should never be picked. - clab6 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab6.AddLocality(testSubZones[1], 0, 0, testEndpointAddrs[1:2], nil) - clab6.AddLocality(testSubZones[2], 1, 0, testEndpointAddrs[2:4], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab6.Build())) - - // Changing weight of locality[1] to 0 caused it to be removed. It's subconn - // should also be removed. - // - // NOTE: this is because we handle locality with weight 0 same as the - // locality doesn't exist. If this changes in the future, this removeSubConn - // behavior will also change. - scToRemove2 := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove2, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc2, scToRemove2) - } - - // Test pick with two subconns different locality weight. - p6 := <-cc.NewPickerCh - // Locality-1 will be not be picked, and locality-2 will be picked. - // Locality-2 contains sc3 and sc4. So expect sc3, sc4. - want = []balancer.SubConn{sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p6)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -// The EDS balancer gets EDS resp with unhealthy endpoints. Test that only -// healthy ones are used. -func (s) TestEDS_EndpointsHealth(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // Two localities, each 3 backend, one Healthy, one Unhealthy, one Unknown. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:6], &testutils.AddLocalityOptions{ - Health: []corepb.HealthStatus{ - corepb.HealthStatus_HEALTHY, - corepb.HealthStatus_UNHEALTHY, - corepb.HealthStatus_UNKNOWN, - corepb.HealthStatus_DRAINING, - corepb.HealthStatus_TIMEOUT, - corepb.HealthStatus_DEGRADED, - }, - }) - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[6:12], &testutils.AddLocalityOptions{ - Health: []corepb.HealthStatus{ - corepb.HealthStatus_HEALTHY, - corepb.HealthStatus_UNHEALTHY, - corepb.HealthStatus_UNKNOWN, - corepb.HealthStatus_DRAINING, - corepb.HealthStatus_TIMEOUT, - corepb.HealthStatus_DEGRADED, - }, - }) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - var ( - readySCs []balancer.SubConn - newSubConnAddrStrs []string - ) - for i := 0; i < 4; i++ { - addr := <-cc.NewSubConnAddrsCh - newSubConnAddrStrs = append(newSubConnAddrStrs, addr[0].Addr) - sc := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc, connectivity.Connecting) - edsb.handleSubConnStateChange(sc, connectivity.Ready) - readySCs = append(readySCs, sc) - } - - wantNewSubConnAddrStrs := []string{ - testEndpointAddrs[0], - testEndpointAddrs[2], - testEndpointAddrs[6], - testEndpointAddrs[8], - } - sortStrTrans := cmp.Transformer("Sort", func(in []string) []string { - out := append([]string(nil), in...) // Copy input to avoid mutating it. - sort.Strings(out) - return out - }) - if !cmp.Equal(newSubConnAddrStrs, wantNewSubConnAddrStrs, sortStrTrans) { - t.Fatalf("want newSubConn with address %v, got %v", wantNewSubConnAddrStrs, newSubConnAddrStrs) - } - - // There should be exactly 4 new SubConns. Check to make sure there's no - // more subconns being created. - select { - case <-cc.NewSubConnCh: - t.Fatalf("Got unexpected new subconn") - case <-time.After(time.Microsecond * 100): - } - - // Test roundrobin with the subconns. - p1 := <-cc.NewPickerCh - want := readySCs - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -func (s) TestClose(t *testing.T) { - edsb := newEDSBalancerImpl(nil, balancer.BuildOptions{}, nil, nil, nil) - // This is what could happen when switching between fallback and eds. This - // make sure it doesn't panic. - edsb.close() -} - -// TestEDS_EmptyUpdate covers the cases when eds impl receives an empty update. -// -// It should send an error picker with transient failure to the parent. -func (s) TestEDS_EmptyUpdate(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // The first update is an empty update. - edsb.handleEDSResponse(xdsclient.EndpointsUpdate{}) - // Pick should fail with transient failure, and all priority removed error. - perr0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := perr0.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(err, errAllPrioritiesRemoved) { - t.Fatalf("picker.Pick, got error %v, want error %v", err, errAllPrioritiesRemoved) - } - } - - // One locality with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Pick with only the first backend. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(gotSCSt.SubConn, sc1) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - } - - edsb.handleEDSResponse(xdsclient.EndpointsUpdate{}) - // Pick should fail with transient failure, and all priority removed error. - perr1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := perr1.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(err, errAllPrioritiesRemoved) { - t.Fatalf("picker.Pick, got error %v, want error %v", err, errAllPrioritiesRemoved) - } - } - - // Handle another update with priorities and localities. - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - - // Pick with only the first backend. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !reflect.DeepEqual(gotSCSt.SubConn, sc2) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } - } -} - -// Create XDS balancer, and update sub-balancer before handling eds responses. -// Then switch between round-robin and a test stub-balancer after handling first -// eds response. -func (s) TestEDS_UpdateSubBalancerName(t *testing.T) { - const balancerName = "stubBalancer-TestEDS_UpdateSubBalancerName" - - cc := testutils.NewTestClientConn(t) - stub.Register(balancerName, stub.BalancerFuncs{ - UpdateClientConnState: func(bd *stub.BalancerData, s balancer.ClientConnState) error { - if len(s.ResolverState.Addresses) == 0 { - return nil - } - bd.ClientConn.NewSubConn(s.ResolverState.Addresses, balancer.NewSubConnOptions{}) - return nil - }, - UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { - bd.ClientConn.UpdateState(balancer.State{ - ConnectivityState: state.ConnectivityState, - Picker: &testutils.TestConstPicker{Err: testutils.ErrTestConstPicker}, - }) - }, - }) - - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - t.Logf("update sub-balancer to stub-balancer") - edsb.handleChildPolicy(balancerName, nil) - - // Two localities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - for i := 0; i < 2; i++ { - sc := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc, connectivity.Ready) - } - - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != testutils.ErrTestConstPicker { - t.Fatalf("picker.Pick, got err %+v, want err %+v", err, testutils.ErrTestConstPicker) - } - } - - t.Logf("update sub-balancer to round-robin") - edsb.handleChildPolicy(roundrobin.Name, nil) - - for i := 0; i < 2; i++ { - <-cc.RemoveSubConnCh - } - - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - - // Test roundrobin with two subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1, sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } - - t.Logf("update sub-balancer to stub-balancer") - edsb.handleChildPolicy(balancerName, nil) - - for i := 0; i < 2; i++ { - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) && - !cmp.Equal(scToRemove, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want (%v or %v), got %v", sc1, sc2, scToRemove) - } - edsb.handleSubConnStateChange(scToRemove, connectivity.Shutdown) - } - - for i := 0; i < 2; i++ { - sc := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc, connectivity.Ready) - } - - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := p2.Pick(balancer.PickInfo{}) - if err != testutils.ErrTestConstPicker { - t.Fatalf("picker.Pick, got err %q, want err %q", err, testutils.ErrTestConstPicker) - } - } - - t.Logf("update sub-balancer to round-robin") - edsb.handleChildPolicy(roundrobin.Name, nil) - - for i := 0; i < 2; i++ { - <-cc.RemoveSubConnCh - } - - sc3 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc3, connectivity.Connecting) - edsb.handleSubConnStateChange(sc3, connectivity.Ready) - sc4 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc4, connectivity.Connecting) - edsb.handleSubConnStateChange(sc4, connectivity.Ready) - - p3 := <-cc.NewPickerCh - want = []balancer.SubConn{sc3, sc4} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p3)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -func (s) TestEDS_CircuitBreaking(t *testing.T) { - origCircuitBreakingSupport := env.CircuitBreakingSupport - env.CircuitBreakingSupport = true - defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() - - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - var maxRequests uint32 = 50 - edsb.updateServiceRequestsConfig("test", &maxRequests) - - // One locality with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Picks with drops. - dones := []func(){} - p := <-cc.NewPickerCh - for i := 0; i < 100; i++ { - pr, err := p.Pick(balancer.PickInfo{}) - if i < 50 && err != nil { - t.Errorf("The first 50%% picks should be non-drops, got error %v", err) - } else if i > 50 && err == nil { - t.Errorf("The second 50%% picks should be drops, got error ") - } - dones = append(dones, func() { - if pr.Done != nil { - pr.Done(balancer.DoneInfo{}) - } - }) - } - - for _, done := range dones { - done() - } - dones = []func(){} - - // Pick without drops. - for i := 0; i < 50; i++ { - pr, err := p.Pick(balancer.PickInfo{}) - if err != nil { - t.Errorf("The third 50%% picks should be non-drops, got error %v", err) - } - dones = append(dones, func() { - if pr.Done != nil { - pr.Done(balancer.DoneInfo{}) - } - }) - } - - // Without this, future tests with the same service name will fail. - for _, done := range dones { - done() - } - - // Send another update, with only circuit breaking update (and no picker - // update afterwards). Make sure the new picker uses the new configs. - var maxRequests2 uint32 = 10 - edsb.updateServiceRequestsConfig("test", &maxRequests2) - - // Picks with drops. - dones = []func(){} - p2 := <-cc.NewPickerCh - for i := 0; i < 100; i++ { - pr, err := p2.Pick(balancer.PickInfo{}) - if i < 10 && err != nil { - t.Errorf("The first 10%% picks should be non-drops, got error %v", err) - } else if i > 10 && err == nil { - t.Errorf("The next 90%% picks should be drops, got error ") - } - dones = append(dones, func() { - if pr.Done != nil { - pr.Done(balancer.DoneInfo{}) - } - }) - } - - for _, done := range dones { - done() - } - dones = []func(){} - - // Pick without drops. - for i := 0; i < 10; i++ { - pr, err := p2.Pick(balancer.PickInfo{}) - if err != nil { - t.Errorf("The next 10%% picks should be non-drops, got error %v", err) - } - dones = append(dones, func() { - if pr.Done != nil { - pr.Done(balancer.DoneInfo{}) - } - }) - } - - // Without this, future tests with the same service name will fail. - for _, done := range dones { - done() - } -} - -func init() { - balancer.Register(&testInlineUpdateBalancerBuilder{}) -} - -// A test balancer that updates balancer.State inline when handling ClientConn -// state. -type testInlineUpdateBalancerBuilder struct{} - -func (*testInlineUpdateBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return &testInlineUpdateBalancer{cc: cc} -} - -func (*testInlineUpdateBalancerBuilder) Name() string { - return "test-inline-update-balancer" -} - -type testInlineUpdateBalancer struct { - cc balancer.ClientConn -} - -func (tb *testInlineUpdateBalancer) ResolverError(error) { - panic("not implemented") -} - -func (tb *testInlineUpdateBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { -} - -var errTestInlineStateUpdate = fmt.Errorf("don't like addresses, empty or not") - -func (tb *testInlineUpdateBalancer) UpdateClientConnState(balancer.ClientConnState) error { - tb.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &testutils.TestConstPicker{Err: errTestInlineStateUpdate}, - }) - return nil -} - -func (*testInlineUpdateBalancer) Close() { -} - -// When the child policy update picker inline in a handleClientUpdate call -// (e.g., roundrobin handling empty addresses). There could be deadlock caused -// by acquiring a locked mutex. -func (s) TestEDS_ChildPolicyUpdatePickerInline(t *testing.T) { - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, nil, nil) - edsb.enqueueChildBalancerStateUpdate = func(p priorityType, state balancer.State) { - // For this test, euqueue needs to happen asynchronously (like in the - // real implementation). - go edsb.updateState(p, state) - } - - edsb.handleChildPolicy("test-inline-update-balancer", nil) - - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != errTestInlineStateUpdate { - t.Fatalf("picker.Pick, got err %q, want err %q", err, errTestInlineStateUpdate) - } - } -} - -func (s) TestDropPicker(t *testing.T) { - const pickCount = 12 - var constPicker = &testutils.TestConstPicker{ - SC: testutils.TestSubConns[0], - } - - tests := []struct { - name string - drops []*dropper - }{ - { - name: "no drop", - drops: nil, - }, - { - name: "one drop", - drops: []*dropper{ - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), - }, - }, - { - name: "two drops", - drops: []*dropper{ - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 3}), - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), - }, - }, - { - name: "three drops", - drops: []*dropper{ - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 3}), - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 4}), - newDropper(xdsclient.OverloadDropConfig{Numerator: 1, Denominator: 2}), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - p := newDropPicker(constPicker, tt.drops, nil, nil, defaultServiceRequestCountMax) - - // scCount is the number of sc's returned by pick. The opposite of - // drop-count. - var ( - scCount int - wantCount = pickCount - ) - for _, dp := range tt.drops { - wantCount = wantCount * int(dp.c.Denominator-dp.c.Numerator) / int(dp.c.Denominator) - } - - for i := 0; i < pickCount; i++ { - _, err := p.Pick(balancer.PickInfo{}) - if err == nil { - scCount++ - } - } - - if scCount != (wantCount) { - t.Errorf("drops: %+v, scCount %v, wantCount %v", tt.drops, scCount, wantCount) - } - }) - } -} - -func (s) TestEDS_LoadReport(t *testing.T) { - origCircuitBreakingSupport := env.CircuitBreakingSupport - env.CircuitBreakingSupport = true - defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() - - // We create an xdsClientWrapper with a dummy xdsClientInterface which only - // implements the LoadStore() method to return the underlying load.Store to - // be used. - loadStore := load.NewStore() - lsWrapper := &loadStoreWrapper{} - lsWrapper.updateServiceName(testClusterNames[0]) - lsWrapper.updateLoadStore(loadStore) - - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, lsWrapper, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - const ( - testServiceName = "test-service" - cbMaxRequests = 20 - ) - var maxRequestsTemp uint32 = cbMaxRequests - edsb.updateServiceRequestsConfig(testServiceName, &maxRequestsTemp) - defer client.ClearCounterForTesting(testServiceName) - - backendToBalancerID := make(map[balancer.SubConn]internal.LocalityID) - - const testDropCategory = "test-drop" - // Two localities, each with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], map[string]uint32{testDropCategory: 50}) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - locality1 := internal.LocalityID{SubZone: testSubZones[0]} - backendToBalancerID[sc1] = locality1 - - // Add the second locality later to make sure sc2 belongs to the second - // locality. Otherwise the test is flaky because of a map is used in EDS to - // keep localities. - clab1.AddLocality(testSubZones[1], 1, 0, testEndpointAddrs[1:2], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc2 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc2, connectivity.Connecting) - edsb.handleSubConnStateChange(sc2, connectivity.Ready) - locality2 := internal.LocalityID{SubZone: testSubZones[1]} - backendToBalancerID[sc2] = locality2 - - // Test roundrobin with two subconns. - p1 := <-cc.NewPickerCh - // We expect the 10 picks to be split between the localities since they are - // of equal weight. And since we only mark the picks routed to sc2 as done, - // the picks on sc1 should show up as inProgress. - locality1JSON, _ := locality1.ToString() - locality2JSON, _ := locality2.ToString() - const ( - rpcCount = 100 - // 50% will be dropped with category testDropCategory. - dropWithCategory = rpcCount / 2 - // In the remaining RPCs, only cbMaxRequests are allowed by circuit - // breaking. Others will be dropped by CB. - dropWithCB = rpcCount - dropWithCategory - cbMaxRequests - - rpcInProgress = cbMaxRequests / 2 // 50% of RPCs will be never done. - rpcSucceeded = cbMaxRequests / 2 // 50% of RPCs will succeed. - ) - wantStoreData := []*load.Data{{ - Cluster: testClusterNames[0], - Service: "", - LocalityStats: map[string]load.LocalityData{ - locality1JSON: {RequestStats: load.RequestData{InProgress: rpcInProgress}}, - locality2JSON: {RequestStats: load.RequestData{Succeeded: rpcSucceeded}}, - }, - TotalDrops: dropWithCategory + dropWithCB, - Drops: map[string]uint64{ - testDropCategory: dropWithCategory, - }, - }} - - var rpcsToBeDone []balancer.PickResult - // Run the picks, but only pick with sc1 will be done later. - for i := 0; i < rpcCount; i++ { - scst, _ := p1.Pick(balancer.PickInfo{}) - if scst.Done != nil && scst.SubConn != sc1 { - rpcsToBeDone = append(rpcsToBeDone, scst) - } - } - // Call done on those sc1 picks. - for _, scst := range rpcsToBeDone { - scst.Done(balancer.DoneInfo{}) - } - - gotStoreData := loadStore.Stats(testClusterNames[0:1]) - if diff := cmp.Diff(wantStoreData, gotStoreData, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(load.Data{}, "ReportInterval")); diff != "" { - t.Errorf("store.stats() returned unexpected diff (-want +got):\n%s", diff) - } -} - -// TestEDS_LoadReportDisabled covers the case that LRS is disabled. It makes -// sure the EDS implementation isn't broken (doesn't panic). -func (s) TestEDS_LoadReportDisabled(t *testing.T) { - lsWrapper := &loadStoreWrapper{} - lsWrapper.updateServiceName(testClusterNames[0]) - // Not calling lsWrapper.updateLoadStore(loadStore) because LRS is disabled. - - cc := testutils.NewTestClientConn(t) - edsb := newEDSBalancerImpl(cc, balancer.BuildOptions{}, nil, lsWrapper, nil) - edsb.enqueueChildBalancerStateUpdate = edsb.updateState - - // One localities, with one backend. - clab1 := testutils.NewClusterLoadAssignmentBuilder(testClusterNames[0], nil) - clab1.AddLocality(testSubZones[0], 1, 0, testEndpointAddrs[:1], nil) - edsb.handleEDSResponse(parseEDSRespProtoForTesting(clab1.Build())) - sc1 := <-cc.NewSubConnCh - edsb.handleSubConnStateChange(sc1, connectivity.Connecting) - edsb.handleSubConnStateChange(sc1, connectivity.Ready) - - // Test roundrobin with two subconns. - p1 := <-cc.NewPickerCh - // We call picks to make sure they don't panic. - for i := 0; i < 10; i++ { - p1.Pick(balancer.PickInfo{}) - } -} diff --git a/xds/internal/balancer/edsbalancer/eds_test.go b/xds/internal/balancer/edsbalancer/eds_test.go deleted file mode 100644 index 5fe1f2ef6b90..000000000000 --- a/xds/internal/balancer/edsbalancer/eds_test.go +++ /dev/null @@ -1,825 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package edsbalancer - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "reflect" - "testing" - "time" - - "github.com/golang/protobuf/jsonpb" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpctest" - scpb "google.golang.org/grpc/internal/proto/grpc_service_config" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" - - _ "google.golang.org/grpc/xds/internal/client/v2" // V2 client registration. -) - -const ( - defaultTestTimeout = 1 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond - testServiceName = "test/foo" - testEDSClusterName = "test/service/eds" -) - -var ( - // A non-empty endpoints update which is expected to be accepted by the EDS - // LB policy. - defaultEndpointsUpdate = xdsclient.EndpointsUpdate{ - Localities: []xdsclient.Locality{ - { - Endpoints: []xdsclient.Endpoint{{Address: "endpoint1"}}, - ID: internal.LocalityID{Zone: "zone"}, - Priority: 1, - Weight: 100, - }, - }, - } -) - -func init() { - balancer.Register(&edsBalancerBuilder{}) -} - -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -const testBalancerNameFooBar = "foo.bar" - -func newNoopTestClientConn() *noopTestClientConn { - return &noopTestClientConn{} -} - -// noopTestClientConn is used in EDS balancer config update tests that only -// cover the config update handling, but not SubConn/load-balancing. -type noopTestClientConn struct { - balancer.ClientConn -} - -func (t *noopTestClientConn) NewSubConn([]resolver.Address, balancer.NewSubConnOptions) (balancer.SubConn, error) { - return nil, nil -} - -func (noopTestClientConn) Target() string { return testServiceName } - -type scStateChange struct { - sc balancer.SubConn - state connectivity.State -} - -type fakeEDSBalancer struct { - cc balancer.ClientConn - childPolicy *testutils.Channel - subconnStateChange *testutils.Channel - edsUpdate *testutils.Channel - serviceName *testutils.Channel - serviceRequestMax *testutils.Channel -} - -func (f *fakeEDSBalancer) handleSubConnStateChange(sc balancer.SubConn, state connectivity.State) { - f.subconnStateChange.Send(&scStateChange{sc: sc, state: state}) -} - -func (f *fakeEDSBalancer) handleChildPolicy(name string, config json.RawMessage) { - f.childPolicy.Send(&loadBalancingConfig{Name: name, Config: config}) -} - -func (f *fakeEDSBalancer) handleEDSResponse(edsResp xdsclient.EndpointsUpdate) { - f.edsUpdate.Send(edsResp) -} - -func (f *fakeEDSBalancer) updateState(priority priorityType, s balancer.State) {} - -func (f *fakeEDSBalancer) updateServiceRequestsConfig(serviceName string, max *uint32) { - f.serviceName.Send(serviceName) - f.serviceRequestMax.Send(max) -} - -func (f *fakeEDSBalancer) close() {} - -func (f *fakeEDSBalancer) waitForChildPolicy(ctx context.Context, wantPolicy *loadBalancingConfig) error { - val, err := f.childPolicy.Receive(ctx) - if err != nil { - return err - } - gotPolicy := val.(*loadBalancingConfig) - if !cmp.Equal(gotPolicy, wantPolicy) { - return fmt.Errorf("got childPolicy %v, want %v", gotPolicy, wantPolicy) - } - return nil -} - -func (f *fakeEDSBalancer) waitForSubConnStateChange(ctx context.Context, wantState *scStateChange) error { - val, err := f.subconnStateChange.Receive(ctx) - if err != nil { - return err - } - gotState := val.(*scStateChange) - if !cmp.Equal(gotState, wantState, cmp.AllowUnexported(scStateChange{})) { - return fmt.Errorf("got subconnStateChange %v, want %v", gotState, wantState) - } - return nil -} - -func (f *fakeEDSBalancer) waitForEDSResponse(ctx context.Context, wantUpdate xdsclient.EndpointsUpdate) error { - val, err := f.edsUpdate.Receive(ctx) - if err != nil { - return err - } - gotUpdate := val.(xdsclient.EndpointsUpdate) - if !reflect.DeepEqual(gotUpdate, wantUpdate) { - return fmt.Errorf("got edsUpdate %+v, want %+v", gotUpdate, wantUpdate) - } - return nil -} - -func (f *fakeEDSBalancer) waitForCounterUpdate(ctx context.Context, wantServiceName string) error { - val, err := f.serviceName.Receive(ctx) - if err != nil { - return err - } - gotServiceName := val.(string) - if gotServiceName != wantServiceName { - return fmt.Errorf("got serviceName %v, want %v", gotServiceName, wantServiceName) - } - return nil -} - -func (f *fakeEDSBalancer) waitForCountMaxUpdate(ctx context.Context, want *uint32) error { - val, err := f.serviceRequestMax.Receive(ctx) - if err != nil { - return err - } - got := val.(*uint32) - - if got == nil && want == nil { - return nil - } - if got != nil && want != nil { - if *got != *want { - return fmt.Errorf("got countMax %v, want %v", *got, *want) - } - return nil - } - return fmt.Errorf("got countMax %+v, want %+v", got, want) -} - -func newFakeEDSBalancer(cc balancer.ClientConn) edsBalancerImplInterface { - return &fakeEDSBalancer{ - cc: cc, - childPolicy: testutils.NewChannelWithSize(10), - subconnStateChange: testutils.NewChannelWithSize(10), - edsUpdate: testutils.NewChannelWithSize(10), - serviceName: testutils.NewChannelWithSize(10), - serviceRequestMax: testutils.NewChannelWithSize(10), - } -} - -type fakeSubConn struct{} - -func (*fakeSubConn) UpdateAddresses([]resolver.Address) { panic("implement me") } -func (*fakeSubConn) Connect() { panic("implement me") } - -// waitForNewEDSLB makes sure that a new edsLB is created by the top-level -// edsBalancer. -func waitForNewEDSLB(ctx context.Context, ch *testutils.Channel) (*fakeEDSBalancer, error) { - val, err := ch.Receive(ctx) - if err != nil { - return nil, fmt.Errorf("error when waiting for a new edsLB: %v", err) - } - return val.(*fakeEDSBalancer), nil -} - -// setup overrides the functions which are used to create the xdsClient and the -// edsLB, creates fake version of them and makes them available on the provided -// channels. The returned cancel function should be called by the test for -// cleanup. -func setup(edsLBCh *testutils.Channel) (*fakeclient.Client, func()) { - xdsC := fakeclient.NewClientWithName(testBalancerNameFooBar) - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } - - origNewEDSBalancer := newEDSBalancer - newEDSBalancer = func(cc balancer.ClientConn, _ balancer.BuildOptions, _ func(priorityType, balancer.State), _ load.PerClusterReporter, _ *grpclog.PrefixLogger) edsBalancerImplInterface { - edsLB := newFakeEDSBalancer(cc) - defer func() { edsLBCh.Send(edsLB) }() - return edsLB - } - return xdsC, func() { - newEDSBalancer = origNewEDSBalancer - newXDSClient = oldNewXDSClient - } -} - -const ( - fakeBalancerA = "fake_balancer_A" - fakeBalancerB = "fake_balancer_B" -) - -// Install two fake balancers for service config update tests. -// -// ParseConfig only accepts the json if the balancer specified is registered. -func init() { - balancer.Register(&fakeBalancerBuilder{name: fakeBalancerA}) - balancer.Register(&fakeBalancerBuilder{name: fakeBalancerB}) -} - -type fakeBalancerBuilder struct { - name string -} - -func (b *fakeBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - return &fakeBalancer{cc: cc} -} - -func (b *fakeBalancerBuilder) Name() string { - return b.name -} - -type fakeBalancer struct { - cc balancer.ClientConn -} - -func (b *fakeBalancer) ResolverError(error) { - panic("implement me") -} - -func (b *fakeBalancer) UpdateClientConnState(balancer.ClientConnState) error { - panic("implement me") -} - -func (b *fakeBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { - panic("implement me") -} - -func (b *fakeBalancer) Close() {} - -// TestConfigChildPolicyUpdate verifies scenarios where the childPolicy -// section of the lbConfig is updated. -// -// The test does the following: -// * Builds a new EDS balancer. -// * Pushes a new ClientConnState with a childPolicy set to fakeBalancerA. -// Verifies that an EDS watch is registered. It then pushes a new edsUpdate -// through the fakexds client. Verifies that a new edsLB is created and it -// receives the expected childPolicy. -// * Pushes a new ClientConnState with a childPolicy set to fakeBalancerB. -// Verifies that the existing edsLB receives the new child policy. -func (s) TestConfigChildPolicyUpdate(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - edsLB, err := waitForNewEDSLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - - lbCfgA := &loadBalancingConfig{ - Name: fakeBalancerA, - Config: json.RawMessage("{}"), - } - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ - ChildPolicy: lbCfgA, - EDSServiceName: testServiceName, - }, - }); err != nil { - t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) - } - - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - xdsC.InvokeWatchEDSCallback(defaultEndpointsUpdate, nil) - if err := edsLB.waitForChildPolicy(ctx, lbCfgA); err != nil { - t.Fatal(err) - } - if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil { - t.Fatal(err) - } - if err := edsLB.waitForCountMaxUpdate(ctx, nil); err != nil { - t.Fatal(err) - } - - var testCountMax uint32 = 100 - lbCfgB := &loadBalancingConfig{ - Name: fakeBalancerB, - Config: json.RawMessage("{}"), - } - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ - ChildPolicy: lbCfgB, - EDSServiceName: testServiceName, - MaxConcurrentRequests: &testCountMax, - }, - }); err != nil { - t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) - } - if err := edsLB.waitForChildPolicy(ctx, lbCfgB); err != nil { - t.Fatal(err) - } - if err := edsLB.waitForCounterUpdate(ctx, testServiceName); err != nil { - // Counter is updated even though the service name didn't change. The - // eds_impl will compare the service names, and skip if it didn't change. - t.Fatal(err) - } - if err := edsLB.waitForCountMaxUpdate(ctx, &testCountMax); err != nil { - t.Fatal(err) - } -} - -// TestSubConnStateChange verifies if the top-level edsBalancer passes on -// the subConnStateChange to appropriate child balancer. -func (s) TestSubConnStateChange(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - edsLB, err := waitForNewEDSLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, - }); err != nil { - t.Fatalf("edsB.UpdateClientConnState() failed: %v", err) - } - - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - xdsC.InvokeWatchEDSCallback(defaultEndpointsUpdate, nil) - - fsc := &fakeSubConn{} - state := connectivity.Ready - edsB.UpdateSubConnState(fsc, balancer.SubConnState{ConnectivityState: state}) - if err := edsLB.waitForSubConnStateChange(ctx, &scStateChange{sc: fsc, state: state}); err != nil { - t.Fatal(err) - } -} - -// TestErrorFromXDSClientUpdate verifies that an error from xdsClient update is -// handled correctly. -// -// If it's resource-not-found, watch will NOT be canceled, the EDS impl will -// receive an empty EDS update, and new RPCs will fail. -// -// If it's connection error, nothing will happen. This will need to change to -// handle fallback. -func (s) TestErrorFromXDSClientUpdate(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - edsLB, err := waitForNewEDSLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, - }); err != nil { - t.Fatal(err) - } - - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) - if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { - t.Fatalf("EDS impl got unexpected EDS response: %v", err) - } - - connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, connectionErr) - - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { - t.Fatal("watch was canceled, want not canceled (timeout error)") - } - - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if err := edsLB.waitForEDSResponse(sCtx, xdsclient.EndpointsUpdate{}); err != context.DeadlineExceeded { - t.Fatal(err) - } - - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "edsBalancer resource not found error") - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, resourceErr) - // Even if error is resource not found, watch shouldn't be canceled, because - // this is an EDS resource removed (and xds client actually never sends this - // error, but we still handles it). - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { - t.Fatal("watch was canceled, want not canceled (timeout error)") - } - if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { - t.Fatalf("eds impl expecting empty update, got %v", err) - } -} - -// TestErrorFromResolver verifies that resolver errors are handled correctly. -// -// If it's resource-not-found, watch will be canceled, the EDS impl will receive -// an empty EDS update, and new RPCs will fail. -// -// If it's connection error, nothing will happen. This will need to change to -// handle fallback. -func (s) TestErrorFromResolver(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - edsLB, err := waitForNewEDSLB(ctx, edsLBCh) - if err != nil { - t.Fatal(err) - } - - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{EDSServiceName: testServiceName}, - }); err != nil { - t.Fatal(err) - } - - if _, err := xdsC.WaitForWatchEDS(ctx); err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - xdsC.InvokeWatchEDSCallback(xdsclient.EndpointsUpdate{}, nil) - if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { - t.Fatalf("EDS impl got unexpected EDS response: %v", err) - } - - connectionErr := xdsclient.NewErrorf(xdsclient.ErrorTypeConnection, "connection error") - edsB.ResolverError(connectionErr) - - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if err := xdsC.WaitForCancelEDSWatch(sCtx); err != context.DeadlineExceeded { - t.Fatal("watch was canceled, want not canceled (timeout error)") - } - - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if err := edsLB.waitForEDSResponse(sCtx, xdsclient.EndpointsUpdate{}); err != context.DeadlineExceeded { - t.Fatal("eds impl got EDS resp, want timeout error") - } - - resourceErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "edsBalancer resource not found error") - edsB.ResolverError(resourceErr) - if err := xdsC.WaitForCancelEDSWatch(ctx); err != nil { - t.Fatalf("want watch to be canceled, waitForCancel failed: %v", err) - } - if err := edsLB.waitForEDSResponse(ctx, xdsclient.EndpointsUpdate{}); err != nil { - t.Fatalf("EDS impl got unexpected EDS response: %v", err) - } -} - -// Given a list of resource names, verifies that EDS requests for the same are -// sent by the EDS balancer, through the fake xDS client. -func verifyExpectedRequests(ctx context.Context, fc *fakeclient.Client, resourceNames ...string) error { - for _, name := range resourceNames { - if name == "" { - // ResourceName empty string indicates a cancel. - if err := fc.WaitForCancelEDSWatch(ctx); err != nil { - return fmt.Errorf("timed out when expecting resource %q", name) - } - return nil - } - - resName, err := fc.WaitForWatchEDS(ctx) - if err != nil { - return fmt.Errorf("timed out when expecting resource %q, %p", name, fc) - } - if resName != name { - return fmt.Errorf("got EDS request for resource %q, expected: %q", resName, name) - } - } - return nil -} - -// TestClientWatchEDS verifies that the xdsClient inside the top-level EDS LB -// policy registers an EDS watch for expected resource upon receiving an update -// from gRPC. -func (s) TestClientWatchEDS(t *testing.T) { - edsLBCh := testutils.NewChannel() - xdsC, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - // Update with an non-empty edsServiceName should trigger an EDS watch for - // the same. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{EDSServiceName: "foobar-1"}, - }); err != nil { - t.Fatal(err) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if err := verifyExpectedRequests(ctx, xdsC, "foobar-1"); err != nil { - t.Fatal(err) - } - - // Also test the case where the edsServerName changes from one non-empty - // name to another, and make sure a new watch is registered. The previously - // registered watch will be cancelled, which will result in an EDS request - // with no resource names being sent to the server. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{EDSServiceName: "foobar-2"}, - }); err != nil { - t.Fatal(err) - } - if err := verifyExpectedRequests(ctx, xdsC, "", "foobar-2"); err != nil { - t.Fatal(err) - } -} - -// TestCounterUpdate verifies that the counter update is triggered with the -// service name from an update's config. -func (s) TestCounterUpdate(t *testing.T) { - edsLBCh := testutils.NewChannel() - _, cleanup := setup(edsLBCh) - defer cleanup() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{Target: resolver.Target{Endpoint: testServiceName}}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - var testCountMax uint32 = 100 - // Update should trigger counter update with provided service name. - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ - EDSServiceName: "foobar-1", - MaxConcurrentRequests: &testCountMax, - }, - }); err != nil { - t.Fatal(err) - } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - edsI := edsB.(*edsBalancer).edsImpl.(*fakeEDSBalancer) - if err := edsI.waitForCounterUpdate(ctx, "foobar-1"); err != nil { - t.Fatal(err) - } - if err := edsI.waitForCountMaxUpdate(ctx, &testCountMax); err != nil { - t.Fatal(err) - } -} - -func (s) TestBalancerConfigParsing(t *testing.T) { - const testEDSName = "eds.service" - var testLRSName = "lrs.server" - b := bytes.NewBuffer(nil) - if err := (&jsonpb.Marshaler{}).Marshal(b, &scpb.XdsConfig{ - ChildPolicy: []*scpb.LoadBalancingConfig{ - {Policy: &scpb.LoadBalancingConfig_Xds{}}, - {Policy: &scpb.LoadBalancingConfig_RoundRobin{ - RoundRobin: &scpb.RoundRobinConfig{}, - }}, - }, - FallbackPolicy: []*scpb.LoadBalancingConfig{ - {Policy: &scpb.LoadBalancingConfig_Xds{}}, - {Policy: &scpb.LoadBalancingConfig_PickFirst{ - PickFirst: &scpb.PickFirstConfig{}, - }}, - }, - EdsServiceName: testEDSName, - LrsLoadReportingServerName: &wrapperspb.StringValue{Value: testLRSName}, - }); err != nil { - t.Fatalf("%v", err) - } - - var testMaxConcurrentRequests uint32 = 123 - tests := []struct { - name string - js json.RawMessage - want serviceconfig.LoadBalancingConfig - wantErr bool - }{ - { - name: "bad json", - js: json.RawMessage(`i am not JSON`), - wantErr: true, - }, - { - name: "empty", - js: json.RawMessage(`{}`), - want: &EDSConfig{}, - }, - { - name: "jsonpb-generated", - js: b.Bytes(), - want: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{ - Name: "round_robin", - Config: json.RawMessage("{}"), - }, - FallBackPolicy: &loadBalancingConfig{ - Name: "pick_first", - Config: json.RawMessage("{}"), - }, - EDSServiceName: testEDSName, - LrsLoadReportingServerName: &testLRSName, - }, - }, - { - // json with random balancers, and the first is not registered. - name: "manually-generated", - js: json.RawMessage(` -{ - "childPolicy": [ - {"fake_balancer_C": {}}, - {"fake_balancer_A": {}}, - {"fake_balancer_B": {}} - ], - "fallbackPolicy": [ - {"fake_balancer_C": {}}, - {"fake_balancer_B": {}}, - {"fake_balancer_A": {}} - ], - "edsServiceName": "eds.service", - "maxConcurrentRequests": 123, - "lrsLoadReportingServerName": "lrs.server" -}`), - want: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{ - Name: "fake_balancer_A", - Config: json.RawMessage("{}"), - }, - FallBackPolicy: &loadBalancingConfig{ - Name: "fake_balancer_B", - Config: json.RawMessage("{}"), - }, - EDSServiceName: testEDSName, - MaxConcurrentRequests: &testMaxConcurrentRequests, - LrsLoadReportingServerName: &testLRSName, - }, - }, - { - // json with no lrs server name, LrsLoadReportingServerName should - // be nil (not an empty string). - name: "no-lrs-server-name", - js: json.RawMessage(` -{ - "edsServiceName": "eds.service" -}`), - want: &EDSConfig{ - EDSServiceName: testEDSName, - LrsLoadReportingServerName: nil, - }, - }, - { - name: "good child policy", - js: json.RawMessage(`{"childPolicy":[{"pick_first":{}}]}`), - want: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{ - Name: "pick_first", - Config: json.RawMessage(`{}`), - }, - }, - }, - { - name: "multiple good child policies", - js: json.RawMessage(`{"childPolicy":[{"round_robin":{}},{"pick_first":{}}]}`), - want: &EDSConfig{ - ChildPolicy: &loadBalancingConfig{ - Name: "round_robin", - Config: json.RawMessage(`{}`), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b := &edsBalancerBuilder{} - got, err := b.ParseConfig(tt.js) - if (err != nil) != tt.wantErr { - t.Fatalf("edsBalancerBuilder.ParseConfig() error = %v, wantErr %v", err, tt.wantErr) - } - if tt.wantErr { - return - } - if !cmp.Equal(got, tt.want) { - t.Errorf(cmp.Diff(got, tt.want)) - } - }) - } -} - -func (s) TestEqualStringPointers(t *testing.T) { - var ( - ta1 = "test-a" - ta2 = "test-a" - tb = "test-b" - ) - tests := []struct { - name string - a *string - b *string - want bool - }{ - {"both-nil", nil, nil, true}, - {"a-non-nil", &ta1, nil, false}, - {"b-non-nil", nil, &tb, false}, - {"equal", &ta1, &ta2, true}, - {"different", &ta1, &tb, false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := equalStringPointers(tt.a, tt.b); got != tt.want { - t.Errorf("equalStringPointers() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/xds/internal/balancer/edsbalancer/eds_testutil.go b/xds/internal/balancer/edsbalancer/eds_testutil.go deleted file mode 100644 index 5e37cdcb47c7..000000000000 --- a/xds/internal/balancer/edsbalancer/eds_testutil.go +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "fmt" - "net" - "strconv" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" - typepb "github.com/envoyproxy/go-control-plane/envoy/type" - "google.golang.org/grpc/xds/internal" - xdsclient "google.golang.org/grpc/xds/internal/client" -) - -// parseEDSRespProtoForTesting parses EDS response, and panic if parsing fails. -// -// TODO: delete this. The EDS balancer tests should build an EndpointsUpdate -// directly, instead of building and parsing a proto message. -func parseEDSRespProtoForTesting(m *xdspb.ClusterLoadAssignment) xdsclient.EndpointsUpdate { - u, err := parseEDSRespProto(m) - if err != nil { - panic(err.Error()) - } - return u -} - -// parseEDSRespProto turns EDS response proto message to EndpointsUpdate. -func parseEDSRespProto(m *xdspb.ClusterLoadAssignment) (xdsclient.EndpointsUpdate, error) { - ret := xdsclient.EndpointsUpdate{} - for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { - ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) - } - priorities := make(map[uint32]struct{}) - for _, locality := range m.Endpoints { - l := locality.GetLocality() - if l == nil { - return xdsclient.EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) - } - lid := internal.LocalityID{ - Region: l.Region, - Zone: l.Zone, - SubZone: l.SubZone, - } - priority := locality.GetPriority() - priorities[priority] = struct{}{} - ret.Localities = append(ret.Localities, xdsclient.Locality{ - ID: lid, - Endpoints: parseEndpoints(locality.GetLbEndpoints()), - Weight: locality.GetLoadBalancingWeight().GetValue(), - Priority: priority, - }) - } - for i := 0; i < len(priorities); i++ { - if _, ok := priorities[uint32(i)]; !ok { - return xdsclient.EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) - } - } - return ret, nil -} - -func parseAddress(socketAddress *corepb.SocketAddress) string { - return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) -} - -func parseDropPolicy(dropPolicy *xdspb.ClusterLoadAssignment_Policy_DropOverload) xdsclient.OverloadDropConfig { - percentage := dropPolicy.GetDropPercentage() - var ( - numerator = percentage.GetNumerator() - denominator uint32 - ) - switch percentage.GetDenominator() { - case typepb.FractionalPercent_HUNDRED: - denominator = 100 - case typepb.FractionalPercent_TEN_THOUSAND: - denominator = 10000 - case typepb.FractionalPercent_MILLION: - denominator = 1000000 - } - return xdsclient.OverloadDropConfig{ - Category: dropPolicy.GetCategory(), - Numerator: numerator, - Denominator: denominator, - } -} - -func parseEndpoints(lbEndpoints []*endpointpb.LbEndpoint) []xdsclient.Endpoint { - endpoints := make([]xdsclient.Endpoint, 0, len(lbEndpoints)) - for _, lbEndpoint := range lbEndpoints { - endpoints = append(endpoints, xdsclient.Endpoint{ - HealthStatus: xdsclient.EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), - Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), - }) - } - return endpoints -} diff --git a/xds/internal/balancer/edsbalancer/load_store_wrapper.go b/xds/internal/balancer/edsbalancer/load_store_wrapper.go deleted file mode 100644 index 18904e47a42e..000000000000 --- a/xds/internal/balancer/edsbalancer/load_store_wrapper.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package edsbalancer - -import ( - "sync" - - "google.golang.org/grpc/xds/internal/client/load" -) - -type loadStoreWrapper struct { - mu sync.RWMutex - service string - // Both store and perCluster will be nil if load reporting is disabled (EDS - // response doesn't have LRS server name). Note that methods on Store and - // perCluster all handle nil, so there's no need to check nil before calling - // them. - store *load.Store - perCluster load.PerClusterReporter -} - -func (lsw *loadStoreWrapper) updateServiceName(service string) { - lsw.mu.Lock() - defer lsw.mu.Unlock() - if lsw.service == service { - return - } - lsw.service = service - lsw.perCluster = lsw.store.PerCluster(lsw.service, "") -} - -func (lsw *loadStoreWrapper) updateLoadStore(store *load.Store) { - lsw.mu.Lock() - defer lsw.mu.Unlock() - if store == lsw.store { - return - } - lsw.store = store - lsw.perCluster = lsw.store.PerCluster(lsw.service, "") -} - -func (lsw *loadStoreWrapper) CallStarted(locality string) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallStarted(locality) - } -} - -func (lsw *loadStoreWrapper) CallFinished(locality string, err error) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallFinished(locality, err) - } -} - -func (lsw *loadStoreWrapper) CallServerLoad(locality, name string, val float64) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallServerLoad(locality, name, val) - } -} - -func (lsw *loadStoreWrapper) CallDropped(category string) { - lsw.mu.RLock() - defer lsw.mu.RUnlock() - if lsw.perCluster != nil { - lsw.perCluster.CallDropped(category) - } -} diff --git a/xds/internal/balancer/edsbalancer/util_test.go b/xds/internal/balancer/edsbalancer/util_test.go deleted file mode 100644 index 748aeffe2bb9..000000000000 --- a/xds/internal/balancer/edsbalancer/util_test.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import ( - "testing" - - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/testutils" -) - -func init() { - newRandomWRR = testutils.NewTestWRR -} - -func (s) TestDropper(t *testing.T) { - const repeat = 2 - - type args struct { - numerator uint32 - denominator uint32 - } - tests := []struct { - name string - args args - }{ - { - name: "2_3", - args: args{ - numerator: 2, - denominator: 3, - }, - }, - { - name: "4_8", - args: args{ - numerator: 4, - denominator: 8, - }, - }, - { - name: "7_20", - args: args{ - numerator: 7, - denominator: 20, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - d := newDropper(xdsclient.OverloadDropConfig{ - Category: "", - Numerator: tt.args.numerator, - Denominator: tt.args.denominator, - }) - - var ( - dCount int - wantCount = int(tt.args.numerator) * repeat - loopCount = int(tt.args.denominator) * repeat - ) - for i := 0; i < loopCount; i++ { - if d.drop() { - dCount++ - } - } - - if dCount != (wantCount) { - t.Errorf("with numerator %v, denominator %v repeat %v, got drop count: %v, want %v", - tt.args.numerator, tt.args.denominator, repeat, dCount, wantCount) - } - }) - } -} diff --git a/xds/internal/balancer/edsbalancer/xds_lrs_test.go b/xds/internal/balancer/edsbalancer/xds_lrs_test.go deleted file mode 100644 index 9f93e0b42f08..000000000000 --- a/xds/internal/balancer/edsbalancer/xds_lrs_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package edsbalancer - -import ( - "context" - "testing" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" -) - -// TestXDSLoadReporting verifies that the edsBalancer starts the loadReport -// stream when the lbConfig passed to it contains a valid value for the LRS -// server (empty string). -func (s) TestXDSLoadReporting(t *testing.T) { - xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() - - builder := balancer.Get(edsName) - edsB := builder.Build(newNoopTestClientConn(), balancer.BuildOptions{}) - if edsB == nil { - t.Fatalf("builder.Build(%s) failed and returned nil", edsName) - } - defer edsB.Close() - - if err := edsB.UpdateClientConnState(balancer.ClientConnState{ - BalancerConfig: &EDSConfig{ - EDSServiceName: testEDSClusterName, - LrsLoadReportingServerName: new(string), - }, - }); err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - gotCluster, err := xdsC.WaitForWatchEDS(ctx) - if err != nil { - t.Fatalf("xdsClient.WatchEndpoints failed with error: %v", err) - } - if gotCluster != testEDSClusterName { - t.Fatalf("xdsClient.WatchEndpoints() called with cluster: %v, want %v", gotCluster, testEDSClusterName) - } - - got, err := xdsC.WaitForReportLoad(ctx) - if err != nil { - t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) - } - if got.Server != "" { - t.Fatalf("xdsClient.ReportLoad called with {%v}: want {\"\"}", got.Server) - } -} diff --git a/xds/internal/balancer/edsbalancer/xds_old.go b/xds/internal/balancer/edsbalancer/xds_old.go deleted file mode 100644 index 6729e6801f15..000000000000 --- a/xds/internal/balancer/edsbalancer/xds_old.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package edsbalancer - -import "google.golang.org/grpc/balancer" - -// The old xds balancer implements logic for both CDS and EDS. With the new -// design, CDS is split and moved to a separate balancer, and the xds balancer -// becomes the EDS balancer. -// -// To keep the existing tests working, this file regisger EDS balancer under the -// old xds balancer name. -// -// TODO: delete this file when migration to new workflow (LDS, RDS, CDS, EDS) is -// done. - -const xdsName = "xds_experimental" - -func init() { - balancer.Register(&xdsBalancerBuilder{}) -} - -// xdsBalancerBuilder register edsBalancerBuilder (now with name -// "eds_experimental") under the old name "xds_experimental". -type xdsBalancerBuilder struct { - edsBalancerBuilder -} - -func (b *xdsBalancerBuilder) Name() string { - return xdsName -} diff --git a/xds/internal/balancer/loadstore/load_store_wrapper.go b/xds/internal/balancer/loadstore/load_store_wrapper.go index 88fa344118cc..8ce958d71ca8 100644 --- a/xds/internal/balancer/loadstore/load_store_wrapper.go +++ b/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -22,7 +22,7 @@ package loadstore import ( "sync" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient/load" ) // NewWrapper creates a Wrapper. diff --git a/xds/internal/balancer/lrs/balancer.go b/xds/internal/balancer/lrs/balancer.go deleted file mode 100644 index ab9ee7109db1..000000000000 --- a/xds/internal/balancer/lrs/balancer.go +++ /dev/null @@ -1,246 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package lrs implements load reporting balancer for xds. -package lrs - -import ( - "encoding/json" - "fmt" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/loadstore" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/load" -) - -func init() { - balancer.Register(&lrsBB{}) -} - -var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } - -const lrsBalancerName = "lrs_experimental" - -type lrsBB struct{} - -func (l *lrsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - b := &lrsBalancer{ - cc: cc, - buildOpts: opts, - } - b.logger = prefixLogger(b) - b.logger.Infof("Created") - - client, err := newXDSClient() - if err != nil { - b.logger.Errorf("failed to create xds-client: %v", err) - return nil - } - b.client = newXDSClientWrapper(client) - - return b -} - -func (l *lrsBB) Name() string { - return lrsBalancerName -} - -func (l *lrsBB) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - return parseConfig(c) -} - -type lrsBalancer struct { - cc balancer.ClientConn - buildOpts balancer.BuildOptions - - logger *grpclog.PrefixLogger - client *xdsClientWrapper - - config *lbConfig - lb balancer.Balancer // The sub balancer. -} - -func (b *lrsBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - newConfig, ok := s.BalancerConfig.(*lbConfig) - if !ok { - return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) - } - - // Update load reporting config or xds client. This needs to be done before - // updating the child policy because we need the loadStore from the updated - // client to be passed to the ccWrapper. - if err := b.client.update(newConfig); err != nil { - return err - } - - // If child policy is a different type, recreate the sub-balancer. - if b.config == nil || b.config.ChildPolicy.Name != newConfig.ChildPolicy.Name { - bb := balancer.Get(newConfig.ChildPolicy.Name) - if bb == nil { - return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) - } - if b.lb != nil { - b.lb.Close() - } - lidJSON, err := newConfig.Locality.ToString() - if err != nil { - return fmt.Errorf("failed to marshal LocalityID: %#v", newConfig.Locality) - } - ccWrapper := newCCWrapper(b.cc, b.client.loadStore(), lidJSON) - b.lb = bb.Build(ccWrapper, b.buildOpts) - } - b.config = newConfig - - // Addresses and sub-balancer config are sent to sub-balancer. - return b.lb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: s.ResolverState, - BalancerConfig: b.config.ChildPolicy.Config, - }) -} - -func (b *lrsBalancer) ResolverError(err error) { - if b.lb != nil { - b.lb.ResolverError(err) - } -} - -func (b *lrsBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { - if b.lb != nil { - b.lb.UpdateSubConnState(sc, s) - } -} - -func (b *lrsBalancer) Close() { - if b.lb != nil { - b.lb.Close() - b.lb = nil - } - b.client.close() -} - -type ccWrapper struct { - balancer.ClientConn - loadStore load.PerClusterReporter - localityIDJSON string -} - -func newCCWrapper(cc balancer.ClientConn, loadStore load.PerClusterReporter, localityIDJSON string) *ccWrapper { - return &ccWrapper{ - ClientConn: cc, - loadStore: loadStore, - localityIDJSON: localityIDJSON, - } -} - -func (ccw *ccWrapper) UpdateState(s balancer.State) { - s.Picker = newLoadReportPicker(s.Picker, ccw.localityIDJSON, ccw.loadStore) - ccw.ClientConn.UpdateState(s) -} - -// xdsClientInterface contains only the xds_client methods needed by LRS -// balancer. It's defined so we can override xdsclient in tests. -type xdsClientInterface interface { - ReportLoad(server string) (*load.Store, func()) - Close() -} - -type xdsClientWrapper struct { - c xdsClientInterface - cancelLoadReport func() - clusterName string - edsServiceName string - lrsServerName string - // loadWrapper is a wrapper with loadOriginal, with clusterName and - // edsServiceName. It's used children to report loads. - loadWrapper *loadstore.Wrapper -} - -func newXDSClientWrapper(c xdsClientInterface) *xdsClientWrapper { - return &xdsClientWrapper{ - c: c, - loadWrapper: loadstore.NewWrapper(), - } -} - -// update checks the config and xdsclient, and decides whether it needs to -// restart the load reporting stream. -func (w *xdsClientWrapper) update(newConfig *lbConfig) error { - var ( - restartLoadReport bool - updateLoadClusterAndService bool - ) - - // ClusterName is different, restart. ClusterName is from ClusterName and - // EdsServiceName. - if w.clusterName != newConfig.ClusterName { - updateLoadClusterAndService = true - w.clusterName = newConfig.ClusterName - } - if w.edsServiceName != newConfig.EdsServiceName { - updateLoadClusterAndService = true - w.edsServiceName = newConfig.EdsServiceName - } - - if updateLoadClusterAndService { - // This updates the clusterName and serviceName that will reported for the - // loads. The update here is too early, the perfect timing is when the - // picker is updated with the new connection. But from this balancer's point - // of view, it's impossible to tell. - // - // On the other hand, this will almost never happen. Each LRS policy - // shouldn't get updated config. The parent should do a graceful switch when - // the clusterName or serviceName is changed. - w.loadWrapper.UpdateClusterAndService(w.clusterName, w.edsServiceName) - } - - if w.lrsServerName != newConfig.LrsLoadReportingServerName { - // LrsLoadReportingServerName is different, load should be report to a - // different server, restart. - restartLoadReport = true - w.lrsServerName = newConfig.LrsLoadReportingServerName - } - - if restartLoadReport { - if w.cancelLoadReport != nil { - w.cancelLoadReport() - w.cancelLoadReport = nil - } - var loadStore *load.Store - if w.c != nil { - loadStore, w.cancelLoadReport = w.c.ReportLoad(w.lrsServerName) - } - w.loadWrapper.UpdateLoadStore(loadStore) - } - - return nil -} - -func (w *xdsClientWrapper) loadStore() load.PerClusterReporter { - return w.loadWrapper -} - -func (w *xdsClientWrapper) close() { - if w.cancelLoadReport != nil { - w.cancelLoadReport() - w.cancelLoadReport = nil - } - w.c.Close() -} diff --git a/xds/internal/balancer/lrs/balancer_test.go b/xds/internal/balancer/lrs/balancer_test.go deleted file mode 100644 index 0b575b112104..000000000000 --- a/xds/internal/balancer/lrs/balancer_test.go +++ /dev/null @@ -1,144 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/connectivity" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/resolver" - xdsinternal "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/fakeclient" -) - -const defaultTestTimeout = 1 * time.Second - -var ( - testBackendAddrs = []resolver.Address{ - {Addr: "1.1.1.1:1"}, - } - testLocality = &xdsinternal.LocalityID{ - Region: "test-region", - Zone: "test-zone", - SubZone: "test-sub-zone", - } -) - -// TestLoadReporting verifies that the lrs balancer starts the loadReport -// stream when the lbConfig passed to it contains a valid value for the LRS -// server (empty string). -func TestLoadReporting(t *testing.T) { - xdsC := fakeclient.NewClient() - oldNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { return xdsC, nil } - defer func() { newXDSClient = oldNewXDSClient }() - - builder := balancer.Get(lrsBalancerName) - cc := testutils.NewTestClientConn(t) - lrsB := builder.Build(cc, balancer.BuildOptions{}) - defer lrsB.Close() - - if err := lrsB.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{ - Addresses: testBackendAddrs, - }, - BalancerConfig: &lbConfig{ - ClusterName: testClusterName, - EdsServiceName: testServiceName, - LrsLoadReportingServerName: testLRSServerName, - Locality: testLocality, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - }, - }, - }); err != nil { - t.Fatalf("unexpected error from UpdateClientConnState: %v", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - got, err := xdsC.WaitForReportLoad(ctx) - if err != nil { - t.Fatalf("xdsClient.ReportLoad failed with error: %v", err) - } - if got.Server != testLRSServerName { - t.Fatalf("xdsClient.ReportLoad called with {%q}: want {%q}", got.Server, testLRSServerName) - } - - sc1 := <-cc.NewSubConnCh - lrsB.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - lrsB.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with one backend. - p1 := <-cc.NewPickerCh - const successCount = 5 - for i := 0; i < successCount; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - gotSCSt.Done(balancer.DoneInfo{}) - } - const errorCount = 5 - for i := 0; i < errorCount; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - gotSCSt.Done(balancer.DoneInfo{Err: fmt.Errorf("error")}) - } - - // Dump load data from the store and compare with expected counts. - loadStore := xdsC.LoadStore() - if loadStore == nil { - t.Fatal("loadStore is nil in xdsClient") - } - sds := loadStore.Stats([]string{testClusterName}) - if len(sds) == 0 { - t.Fatalf("loads for cluster %v not found in store", testClusterName) - } - sd := sds[0] - if sd.Cluster != testClusterName || sd.Service != testServiceName { - t.Fatalf("got unexpected load for %q, %q, want %q, %q", sd.Cluster, sd.Service, testClusterName, testServiceName) - } - testLocalityJSON, _ := testLocality.ToString() - localityData, ok := sd.LocalityStats[testLocalityJSON] - if !ok { - t.Fatalf("loads for %v not found in store", testLocality) - } - reqStats := localityData.RequestStats - if reqStats.Succeeded != successCount { - t.Errorf("got succeeded %v, want %v", reqStats.Succeeded, successCount) - } - if reqStats.Errored != errorCount { - t.Errorf("got errord %v, want %v", reqStats.Errored, errorCount) - } - if reqStats.InProgress != 0 { - t.Errorf("got inProgress %v, want %v", reqStats.InProgress, 0) - } -} diff --git a/xds/internal/balancer/lrs/config.go b/xds/internal/balancer/lrs/config.go deleted file mode 100644 index 3d39961401b5..000000000000 --- a/xds/internal/balancer/lrs/config.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - "encoding/json" - "fmt" - - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal" -) - -type lbConfig struct { - serviceconfig.LoadBalancingConfig - ClusterName string - EdsServiceName string - LrsLoadReportingServerName string - Locality *internal.LocalityID - ChildPolicy *internalserviceconfig.BalancerConfig -} - -func parseConfig(c json.RawMessage) (*lbConfig, error) { - var cfg lbConfig - if err := json.Unmarshal(c, &cfg); err != nil { - return nil, err - } - if cfg.ClusterName == "" { - return nil, fmt.Errorf("required ClusterName is not set in %+v", cfg) - } - if cfg.LrsLoadReportingServerName == "" { - return nil, fmt.Errorf("required LrsLoadReportingServerName is not set in %+v", cfg) - } - if cfg.Locality == nil { - return nil, fmt.Errorf("required Locality is not set in %+v", cfg) - } - return &cfg, nil -} diff --git a/xds/internal/balancer/lrs/config_test.go b/xds/internal/balancer/lrs/config_test.go deleted file mode 100644 index f49430569fed..000000000000 --- a/xds/internal/balancer/lrs/config_test.go +++ /dev/null @@ -1,127 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/balancer/roundrobin" - internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" - xdsinternal "google.golang.org/grpc/xds/internal" -) - -const ( - testClusterName = "test-cluster" - testServiceName = "test-eds-service" - testLRSServerName = "test-lrs-name" -) - -func TestParseConfig(t *testing.T) { - tests := []struct { - name string - js string - want *lbConfig - wantErr bool - }{ - { - name: "no cluster name", - js: `{ - "edsServiceName": "test-eds-service", - "lrsLoadReportingServerName": "test-lrs-name", - "locality": { - "region": "test-region", - "zone": "test-zone", - "subZone": "test-sub-zone" - }, - "childPolicy":[{"round_robin":{}}] -} - `, - wantErr: true, - }, - { - name: "no LRS server name", - js: `{ - "clusterName": "test-cluster", - "edsServiceName": "test-eds-service", - "locality": { - "region": "test-region", - "zone": "test-zone", - "subZone": "test-sub-zone" - }, - "childPolicy":[{"round_robin":{}}] -} - `, - wantErr: true, - }, - { - name: "no locality", - js: `{ - "clusterName": "test-cluster", - "edsServiceName": "test-eds-service", - "lrsLoadReportingServerName": "test-lrs-name", - "childPolicy":[{"round_robin":{}}] -} - `, - wantErr: true, - }, - { - name: "good", - js: `{ - "clusterName": "test-cluster", - "edsServiceName": "test-eds-service", - "lrsLoadReportingServerName": "test-lrs-name", - "locality": { - "region": "test-region", - "zone": "test-zone", - "subZone": "test-sub-zone" - }, - "childPolicy":[{"round_robin":{}}] -} - `, - want: &lbConfig{ - ClusterName: testClusterName, - EdsServiceName: testServiceName, - LrsLoadReportingServerName: testLRSServerName, - Locality: &xdsinternal.LocalityID{ - Region: "test-region", - Zone: "test-zone", - SubZone: "test-sub-zone", - }, - ChildPolicy: &internalserviceconfig.BalancerConfig{ - Name: roundrobin.Name, - Config: nil, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := parseConfig([]byte(tt.js)) - if (err != nil) != tt.wantErr { - t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) - return - } - if diff := cmp.Diff(got, tt.want); diff != "" { - t.Errorf("parseConfig() got = %v, want %v, diff: %s", got, tt.want, diff) - } - }) - } -} diff --git a/xds/internal/balancer/lrs/picker.go b/xds/internal/balancer/lrs/picker.go deleted file mode 100644 index 1e4ad156e5b7..000000000000 --- a/xds/internal/balancer/lrs/picker.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package lrs - -import ( - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" - "google.golang.org/grpc/balancer" -) - -const ( - serverLoadCPUName = "cpu_utilization" - serverLoadMemoryName = "mem_utilization" -) - -// loadReporter wraps the methods from the loadStore that are used here. -type loadReporter interface { - CallStarted(locality string) - CallFinished(locality string, err error) - CallServerLoad(locality, name string, val float64) -} - -type loadReportPicker struct { - p balancer.Picker - - locality string - loadStore loadReporter -} - -func newLoadReportPicker(p balancer.Picker, id string, loadStore loadReporter) *loadReportPicker { - return &loadReportPicker{ - p: p, - locality: id, - loadStore: loadStore, - } -} - -func (lrp *loadReportPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - res, err := lrp.p.Pick(info) - if err != nil { - return res, err - } - - if lrp.loadStore == nil { - return res, err - } - - lrp.loadStore.CallStarted(lrp.locality) - oldDone := res.Done - res.Done = func(info balancer.DoneInfo) { - if oldDone != nil { - oldDone(info) - } - lrp.loadStore.CallFinished(lrp.locality, info.Err) - - load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport) - if !ok { - return - } - lrp.loadStore.CallServerLoad(lrp.locality, serverLoadCPUName, load.CpuUtilization) - lrp.loadStore.CallServerLoad(lrp.locality, serverLoadMemoryName, load.MemUtilization) - for n, d := range load.RequestCost { - lrp.loadStore.CallServerLoad(lrp.locality, n, d) - } - for n, d := range load.Utilization { - lrp.loadStore.CallServerLoad(lrp.locality, n, d) - } - } - return res, err -} diff --git a/xds/internal/balancer/orca/orca.go b/xds/internal/balancer/orca/orca.go deleted file mode 100644 index 28016806eec4..000000000000 --- a/xds/internal/balancer/orca/orca.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package orca implements Open Request Cost Aggregation. -package orca - -import ( - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/balancerload" - "google.golang.org/grpc/metadata" -) - -const mdKey = "X-Endpoint-Load-Metrics-Bin" - -var logger = grpclog.Component("xds") - -// toBytes converts a orca load report into bytes. -func toBytes(r *orcapb.OrcaLoadReport) []byte { - if r == nil { - return nil - } - - b, err := proto.Marshal(r) - if err != nil { - logger.Warningf("orca: failed to marshal load report: %v", err) - return nil - } - return b -} - -// ToMetadata converts a orca load report into grpc metadata. -func ToMetadata(r *orcapb.OrcaLoadReport) metadata.MD { - b := toBytes(r) - if b == nil { - return nil - } - return metadata.Pairs(mdKey, string(b)) -} - -// fromBytes reads load report bytes and converts it to orca. -func fromBytes(b []byte) *orcapb.OrcaLoadReport { - ret := new(orcapb.OrcaLoadReport) - if err := proto.Unmarshal(b, ret); err != nil { - logger.Warningf("orca: failed to unmarshal load report: %v", err) - return nil - } - return ret -} - -// FromMetadata reads load report from metadata and converts it to orca. -// -// It returns nil if report is not found in metadata. -func FromMetadata(md metadata.MD) *orcapb.OrcaLoadReport { - vs := md.Get(mdKey) - if len(vs) == 0 { - return nil - } - return fromBytes([]byte(vs[0])) -} - -type loadParser struct{} - -func (*loadParser) Parse(md metadata.MD) interface{} { - return FromMetadata(md) -} - -func init() { - balancerload.SetParser(&loadParser{}) -} diff --git a/xds/internal/balancer/orca/orca_test.go b/xds/internal/balancer/orca/orca_test.go deleted file mode 100644 index d7a44134e22b..000000000000 --- a/xds/internal/balancer/orca/orca_test.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package orca - -import ( - "strings" - "testing" - - orcapb "github.com/cncf/udpa/go/udpa/data/orca/v1" - "github.com/golang/protobuf/proto" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/metadata" -) - -var ( - testMessage = &orcapb.OrcaLoadReport{ - CpuUtilization: 0.1, - MemUtilization: 0.2, - RequestCost: map[string]float64{"ccc": 3.4}, - Utilization: map[string]float64{"ttt": 0.4}, - } - testBytes, _ = proto.Marshal(testMessage) -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -func (s) TestToMetadata(t *testing.T) { - tests := []struct { - name string - r *orcapb.OrcaLoadReport - want metadata.MD - }{{ - name: "nil", - r: nil, - want: nil, - }, { - name: "valid", - r: testMessage, - want: metadata.MD{ - strings.ToLower(mdKey): []string{string(testBytes)}, - }, - }} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := ToMetadata(tt.r); !cmp.Equal(got, tt.want) { - t.Errorf("ToMetadata() = %v, want %v", got, tt.want) - } - }) - } -} - -func (s) TestFromMetadata(t *testing.T) { - tests := []struct { - name string - md metadata.MD - want *orcapb.OrcaLoadReport - }{{ - name: "nil", - md: nil, - want: nil, - }, { - name: "valid", - md: metadata.MD{ - strings.ToLower(mdKey): []string{string(testBytes)}, - }, - want: testMessage, - }} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := FromMetadata(tt.md); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { - t.Errorf("FromMetadata() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/xds/internal/balancer/outlierdetection/balancer.go b/xds/internal/balancer/outlierdetection/balancer.go new file mode 100644 index 000000000000..eaf4f7fc9ab7 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/balancer.go @@ -0,0 +1,930 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package outlierdetection provides an implementation of the outlier detection +// LB policy, as defined in +// https://github.com/grpc/proposal/blob/master/A50-xds-outlier-detection.md. +package outlierdetection + +import ( + "encoding/json" + "fmt" + "math" + "strings" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Globals to stub out in tests. +var ( + afterFunc = time.AfterFunc + now = time.Now +) + +// Name is the name of the outlier detection balancer. +const Name = "outlier_detection_experimental" + +func init() { + if envconfig.XDSOutlierDetection { + balancer.Register(bb{}) + } +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &outlierDetectionBalancer{ + cc: cc, + closed: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + addrs: make(map[string]*addressInfo), + scWrappers: make(map[balancer.SubConn]*subConnWrapper), + scUpdateCh: buffer.NewUnbounded(), + pickerUpdateCh: buffer.NewUnbounded(), + channelzParentID: bOpts.ChannelzParentID, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + b.child = gracefulswitch.NewBalancer(b, bOpts) + go b.run() + return b +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + lbCfg := &LBConfig{ + // Default top layer values as documented in A50. + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + } + + // This unmarshalling handles underlying layers sre and fpe which have their + // own defaults for their fields if either sre or fpe are present. + if err := json.Unmarshal(s, lbCfg); err != nil { // Validates child config if present as well. + return nil, fmt.Errorf("xds: unable to unmarshal LBconfig: %s, error: %v", string(s), err) + } + + // Note: in the xds flow, these validations will never fail. The xdsclient + // performs the same validations as here on the xds Outlier Detection + // resource before parsing resource into JSON which this function gets + // called with. A50 defines two separate places for these validations to + // take place, the xdsclient and this ParseConfig method. "When parsing a + // config from JSON, if any of these requirements is violated, that should + // be treated as a parsing error." - A50 + switch { + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + // Approximately 290 years is the maximum time that time.Duration (int64) + // can represent. The restrictions on the protobuf.Duration field are to be + // within +-10000 years. Thus, just check for negative values. + case lbCfg.Interval < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.interval = %s; must be >= 0", lbCfg.Interval) + case lbCfg.BaseEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.base_ejection_time = %s; must be >= 0", lbCfg.BaseEjectionTime) + case lbCfg.MaxEjectionTime < 0: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_time = %s; must be >= 0", lbCfg.MaxEjectionTime) + + // "The fields max_ejection_percent, + // success_rate_ejection.enforcement_percentage, + // failure_percentage_ejection.threshold, and + // failure_percentage.enforcement_percentage must have values less than or + // equal to 100." - A50 + case lbCfg.MaxEjectionPercent > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.max_ejection_percent = %v; must be <= 100", lbCfg.MaxEjectionPercent) + case lbCfg.SuccessRateEjection != nil && lbCfg.SuccessRateEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage = %v; must be <= 100", lbCfg.SuccessRateEjection.EnforcementPercentage) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.Threshold > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = %v; must be <= 100", lbCfg.FailurePercentageEjection.Threshold) + case lbCfg.FailurePercentageEjection != nil && lbCfg.FailurePercentageEjection.EnforcementPercentage > 100: + return nil, fmt.Errorf("OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = %v; must be <= 100", lbCfg.FailurePercentageEjection.EnforcementPercentage) + } + return lbCfg, nil +} + +func (bb) Name() string { + return Name +} + +// scUpdate wraps a subConn update to be sent to the child balancer. +type scUpdate struct { + scw *subConnWrapper + state balancer.SubConnState +} + +type ejectionUpdate struct { + scw *subConnWrapper + isEjected bool // true for ejected, false for unejected +} + +type lbCfgUpdate struct { + lbCfg *LBConfig + // to make sure picker is updated synchronously. + done chan struct{} +} + +type outlierDetectionBalancer struct { + // These fields are safe to be accessed without holding any mutex because + // they are synchronized in run(), which makes these field accesses happen + // serially. + // + // childState is the latest balancer state received from the child. + childState balancer.State + // recentPickerNoop represents whether the most recent picker sent upward to + // the balancer.ClientConn is a noop picker, which doesn't count RPC's. Used + // to suppress redundant picker updates. + recentPickerNoop bool + + closed *grpcsync.Event + done *grpcsync.Event + cc balancer.ClientConn + logger *grpclog.PrefixLogger + channelzParentID *channelz.Identifier + + // childMu guards calls into child (to uphold the balancer.Balancer API + // guarantee of synchronous calls). + childMu sync.Mutex + child *gracefulswitch.Balancer + + // mu guards access to the following fields. It also helps to synchronize + // behaviors of the following events: config updates, firing of the interval + // timer, SubConn State updates, SubConn address updates, and child state + // updates. + // + // For example, when we receive a config update in the middle of the + // interval timer algorithm, which uses knobs present in the config, the + // balancer will wait for the interval timer algorithm to finish before + // persisting the new configuration. + // + // Another example would be the updating of the addrs map, such as from a + // SubConn address update in the middle of the interval timer algorithm + // which uses addrs. This balancer waits for the interval timer algorithm to + // finish before making the update to the addrs map. + // + // This mutex is never held at the same time as childMu (within the context + // of a single goroutine). + mu sync.Mutex + addrs map[string]*addressInfo + cfg *LBConfig + scWrappers map[balancer.SubConn]*subConnWrapper + timerStartTime time.Time + intervalTimer *time.Timer + inhibitPickerUpdates bool + updateUnconditionally bool + numAddrsEjected int // For fast calculations of percentage of addrs ejected + + scUpdateCh *buffer.Unbounded + pickerUpdateCh *buffer.Unbounded +} + +// noopConfig returns whether this balancer is configured with a logical no-op +// configuration or not. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) noopConfig() bool { + return b.cfg.SuccessRateEjection == nil && b.cfg.FailurePercentageEjection == nil +} + +// onIntervalConfig handles logic required specifically on the receipt of a +// configuration which specifies to count RPC's and periodically perform passive +// health checking based on heuristics defined in configuration every configured +// interval. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onIntervalConfig() { + var interval time.Duration + if b.timerStartTime.IsZero() { + b.timerStartTime = time.Now() + for _, addrInfo := range b.addrs { + addrInfo.callCounter.clear() + } + interval = time.Duration(b.cfg.Interval) + } else { + interval = time.Duration(b.cfg.Interval) - now().Sub(b.timerStartTime) + if interval < 0 { + interval = 0 + } + } + b.intervalTimer = afterFunc(interval, b.intervalTimerAlgorithm) +} + +// onNoopConfig handles logic required specifically on the receipt of a +// configuration which specifies the balancer to be a noop. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) onNoopConfig() { + // "If a config is provided with both the `success_rate_ejection` and + // `failure_percentage_ejection` fields unset, skip starting the timer and + // do the following:" + // "Unset the timer start timestamp." + b.timerStartTime = time.Time{} + for _, addrInfo := range b.addrs { + // "Uneject all currently ejected addresses." + if !addrInfo.latestEjectionTimestamp.IsZero() { + b.unejectAddress(addrInfo) + } + // "Reset each address's ejection time multiplier to 0." + addrInfo.ejectionTimeMultiplier = 0 + } +} + +func (b *outlierDetectionBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + // Reject whole config if child policy doesn't exist, don't persist it for + // later. + bb := balancer.Get(lbCfg.ChildPolicy.Name) + if bb == nil { + return fmt.Errorf("outlier detection: child balancer %q not registered", lbCfg.ChildPolicy.Name) + } + + // It is safe to read b.cfg here without holding the mutex, as the only + // write to b.cfg happens later in this function. This function is part of + // the balancer.Balancer API, so it is guaranteed to be called in a + // synchronous manner, so it cannot race with this read. + if b.cfg == nil || b.cfg.ChildPolicy.Name != lbCfg.ChildPolicy.Name { + b.childMu.Lock() + err := b.child.SwitchTo(bb) + if err != nil { + b.childMu.Unlock() + return fmt.Errorf("outlier detection: error switching to child of type %q: %v", lbCfg.ChildPolicy.Name, err) + } + b.childMu.Unlock() + } + + b.mu.Lock() + // Inhibit child picker updates until this UpdateClientConnState() call + // completes. If needed, a picker update containing the no-op config bit + // determined from this config and most recent state from the child will be + // sent synchronously upward at the end of this UpdateClientConnState() + // call. + b.inhibitPickerUpdates = true + b.updateUnconditionally = false + b.cfg = lbCfg + + addrs := make(map[string]bool, len(s.ResolverState.Addresses)) + for _, addr := range s.ResolverState.Addresses { + addrs[addr.Addr] = true + if _, ok := b.addrs[addr.Addr]; !ok { + b.addrs[addr.Addr] = newAddressInfo() + } + } + for addr := range b.addrs { + if !addrs[addr] { + delete(b.addrs, addr) + } + } + + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + + if b.noopConfig() { + b.onNoopConfig() + } else { + b.onIntervalConfig() + } + b.mu.Unlock() + + b.childMu.Lock() + err := b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: b.cfg.ChildPolicy.Config, + }) + b.childMu.Unlock() + + done := make(chan struct{}) + b.pickerUpdateCh.Put(lbCfgUpdate{ + lbCfg: lbCfg, + done: done, + }) + <-done + + return err +} + +func (b *outlierDetectionBalancer) ResolverError(err error) { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ResolverError(err) +} + +func (b *outlierDetectionBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + scw, ok := b.scWrappers[sc] + if !ok { + // Shouldn't happen if passed down a SubConnWrapper to child on SubConn + // creation. + b.logger.Errorf("UpdateSubConnState called with SubConn that has no corresponding SubConnWrapper") + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(b.scWrappers, scw.SubConn) + } + b.scUpdateCh.Put(&scUpdate{ + scw: scw, + state: state, + }) +} + +func (b *outlierDetectionBalancer) Close() { + b.closed.Fire() + <-b.done.Done() + b.childMu.Lock() + b.child.Close() + b.childMu.Unlock() + + b.scUpdateCh.Close() + b.pickerUpdateCh.Close() + + b.mu.Lock() + defer b.mu.Unlock() + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } +} + +func (b *outlierDetectionBalancer) ExitIdle() { + b.childMu.Lock() + defer b.childMu.Unlock() + b.child.ExitIdle() +} + +// wrappedPicker delegates to the child policy's picker, and when the request +// finishes, it increments the corresponding counter in the map entry referenced +// by the subConnWrapper that was picked. If both the `success_rate_ejection` +// and `failure_percentage_ejection` fields are unset in the configuration, this +// picker will not count. +type wrappedPicker struct { + childPicker balancer.Picker + noopPicker bool +} + +func (wp *wrappedPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + pr, err := wp.childPicker.Pick(info) + if err != nil { + return balancer.PickResult{}, err + } + + done := func(di balancer.DoneInfo) { + if !wp.noopPicker { + incrementCounter(pr.SubConn, di) + } + if pr.Done != nil { + pr.Done(di) + } + } + scw, ok := pr.SubConn.(*subConnWrapper) + if !ok { + // This can never happen, but check is present for defensive + // programming. + logger.Errorf("Picked SubConn from child picker is not a SubConnWrapper") + return balancer.PickResult{ + SubConn: pr.SubConn, + Done: done, + Metadata: pr.Metadata, + }, nil + } + return balancer.PickResult{ + SubConn: scw.SubConn, + Done: done, + Metadata: pr.Metadata, + }, nil +} + +func incrementCounter(sc balancer.SubConn, info balancer.DoneInfo) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Shouldn't happen, as comes from child + return + } + + // scw.addressInfo and callCounter.activeBucket can be written to + // concurrently (the pointers themselves). Thus, protect the reads here with + // atomics to prevent data corruption. There exists a race in which you read + // the addressInfo or active bucket pointer and then that pointer points to + // deprecated memory. If this goroutine yields the processor, in between + // reading the addressInfo pointer and writing to the active bucket, + // UpdateAddresses can switch the addressInfo the scw points to. Writing to + // an outdated addresses is a very small race and tolerable. After reading + // callCounter.activeBucket in this picker a swap call can concurrently + // change what activeBucket points to. A50 says to swap the pointer, which + // will cause this race to write to deprecated memory the interval timer + // algorithm will never read, which makes this race alright. + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + ab := (*bucket)(atomic.LoadPointer(&addrInfo.callCounter.activeBucket)) + + if info.Err == nil { + atomic.AddUint32(&ab.numSuccesses, 1) + } else { + atomic.AddUint32(&ab.numFailures, 1) + } +} + +func (b *outlierDetectionBalancer) UpdateState(s balancer.State) { + b.pickerUpdateCh.Put(s) +} + +func (b *outlierDetectionBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + sc, err := b.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + scw := &subConnWrapper{ + SubConn: sc, + addresses: addrs, + scUpdateCh: b.scUpdateCh, + } + b.mu.Lock() + defer b.mu.Unlock() + b.scWrappers[sc] = scw + if len(addrs) != 1 { + return scw, nil + } + addrInfo, ok := b.addrs[addrs[0].Addr] + if !ok { + return scw, nil + } + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + if !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + return scw, nil +} + +func (b *outlierDetectionBalancer) RemoveSubConn(sc balancer.SubConn) { + scw, ok := sc.(*subConnWrapper) + if !ok { // Shouldn't happen + return + } + // Remove the wrapped SubConn from the parent Client Conn. We don't remove + // from map entry until we get a Shutdown state for the SubConn, as we need + // that data to forward that state down. + b.cc.RemoveSubConn(scw.SubConn) +} + +// appendIfPresent appends the scw to the address, if the address is present in +// the Outlier Detection balancers address map. Returns nil if not present, and +// the map entry if present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) appendIfPresent(addr string, scw *subConnWrapper) *addressInfo { + addrInfo, ok := b.addrs[addr] + if !ok { + return nil + } + + addrInfo.sws = append(addrInfo.sws, scw) + atomic.StorePointer(&scw.addressInfo, unsafe.Pointer(addrInfo)) + return addrInfo +} + +// removeSubConnFromAddressesMapEntry removes the scw from its map entry if +// present. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) removeSubConnFromAddressesMapEntry(scw *subConnWrapper) { + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo == nil { + return + } + for i, sw := range addrInfo.sws { + if scw == sw { + addrInfo.sws = append(addrInfo.sws[:i], addrInfo.sws[i+1:]...) + return + } + } +} + +func (b *outlierDetectionBalancer) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + scw, ok := sc.(*subConnWrapper) + if !ok { + // Return, shouldn't happen if passed up scw + return + } + + b.cc.UpdateAddresses(scw.SubConn, addrs) + b.mu.Lock() + defer b.mu.Unlock() + + // Note that 0 addresses is a valid update/state for a SubConn to be in. + // This is correctly handled by this algorithm (handled as part of a non singular + // old address/new address). + switch { + case len(scw.addresses) == 1 && len(addrs) == 1: // single address to single address + // If the updated address is the same, then there is nothing to do + // past this point. + if scw.addresses[0].Addr == addrs[0].Addr { + return + } + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo == nil { // uneject unconditionally because could have come from an ejected address + scw.uneject() + break + } + if addrInfo.latestEjectionTimestamp.IsZero() { // relay new updated subconn state + scw.uneject() + } else { + scw.eject() + } + case len(scw.addresses) == 1: // single address to multiple/no addresses + b.removeSubConnFromAddressesMapEntry(scw) + addrInfo := (*addressInfo)(atomic.LoadPointer(&scw.addressInfo)) + if addrInfo != nil { + addrInfo.callCounter.clear() + } + scw.uneject() + case len(addrs) == 1: // multiple/no addresses to single address + addrInfo := b.appendIfPresent(addrs[0].Addr, scw) + if addrInfo != nil && !addrInfo.latestEjectionTimestamp.IsZero() { + scw.eject() + } + } // otherwise multiple/no addresses to multiple/no addresses; ignore + + scw.addresses = addrs +} + +func (b *outlierDetectionBalancer) ResolveNow(opts resolver.ResolveNowOptions) { + b.cc.ResolveNow(opts) +} + +func (b *outlierDetectionBalancer) Target() string { + return b.cc.Target() +} + +func max(x, y time.Duration) time.Duration { + if x < y { + return y + } + return x +} + +func min(x, y time.Duration) time.Duration { + if x < y { + return x + } + return y +} + +// handleSubConnUpdate stores the recent state and forward the update +// if the SubConn is not ejected. +func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { + scw := u.scw + scw.latestState = u.state + if !scw.ejected { + b.childMu.Lock() + b.child.UpdateSubConnState(scw, u.state) + b.childMu.Unlock() + } +} + +// handleEjectedUpdate handles any SubConns that get ejected/unejected, and +// forwards the appropriate corresponding subConnState to the child policy. +func (b *outlierDetectionBalancer) handleEjectedUpdate(u *ejectionUpdate) { + scw := u.scw + scw.ejected = u.isEjected + // If scw.latestState has never been written to will default to connectivity + // IDLE, which is fine. + stateToUpdate := scw.latestState + if u.isEjected { + stateToUpdate = balancer.SubConnState{ + ConnectivityState: connectivity.TransientFailure, + } + } + b.childMu.Lock() + b.child.UpdateSubConnState(scw, stateToUpdate) + b.childMu.Unlock() +} + +// handleChildStateUpdate forwards the picker update wrapped in a wrapped picker +// with the noop picker bit present. +func (b *outlierDetectionBalancer) handleChildStateUpdate(u balancer.State) { + b.childState = u + b.mu.Lock() + if b.inhibitPickerUpdates { + // If a child's state is updated during the suppression of child + // updates, the synchronous handleLBConfigUpdate function with respect + // to UpdateClientConnState should return a picker unconditionally. + b.updateUnconditionally = true + b.mu.Unlock() + return + } + noopCfg := b.noopConfig() + b.mu.Unlock() + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) +} + +// handleLBConfigUpdate compares whether the new config is a noop config or not, +// to the noop bit in the picker if present. It updates the picker if this bit +// changed compared to the picker currently in use. +func (b *outlierDetectionBalancer) handleLBConfigUpdate(u lbCfgUpdate) { + lbCfg := u.lbCfg + noopCfg := lbCfg.SuccessRateEjection == nil && lbCfg.FailurePercentageEjection == nil + // If the child has sent it's first update and this config flips the noop + // bit compared to the most recent picker update sent upward, then a new + // picker with this updated bit needs to be forwarded upward. If a child + // update was received during the suppression of child updates within + // UpdateClientConnState(), then a new picker needs to be forwarded with + // this updated state, irregardless of whether this new configuration flips + // the bit. + if b.childState.Picker != nil && noopCfg != b.recentPickerNoop || b.updateUnconditionally { + b.recentPickerNoop = noopCfg + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: &wrappedPicker{ + childPicker: b.childState.Picker, + noopPicker: noopCfg, + }, + }) + } + b.inhibitPickerUpdates = false + b.updateUnconditionally = false + close(u.done) +} + +func (b *outlierDetectionBalancer) run() { + defer b.done.Fire() + for { + select { + case update, ok := <-b.scUpdateCh.Get(): + if !ok { + return + } + b.scUpdateCh.Load() + if b.closed.HasFired() { // don't send SubConn updates to child after the balancer has been closed + return + } + switch u := update.(type) { + case *scUpdate: + b.handleSubConnUpdate(u) + case *ejectionUpdate: + b.handleEjectedUpdate(u) + } + case update, ok := <-b.pickerUpdateCh.Get(): + if !ok { + return + } + b.pickerUpdateCh.Load() + if b.closed.HasFired() { // don't send picker updates to grpc after the balancer has been closed + return + } + switch u := update.(type) { + case balancer.State: + b.handleChildStateUpdate(u) + case lbCfgUpdate: + b.handleLBConfigUpdate(u) + } + case <-b.closed.Done(): + return + } + } +} + +// intervalTimerAlgorithm ejects and unejects addresses based on the Outlier +// Detection configuration and data about each address from the previous +// interval. +func (b *outlierDetectionBalancer) intervalTimerAlgorithm() { + b.mu.Lock() + defer b.mu.Unlock() + b.timerStartTime = time.Now() + + for _, addrInfo := range b.addrs { + addrInfo.callCounter.swap() + } + + if b.cfg.SuccessRateEjection != nil { + b.successRateAlgorithm() + } + + if b.cfg.FailurePercentageEjection != nil { + b.failurePercentageAlgorithm() + } + + for _, addrInfo := range b.addrs { + if addrInfo.latestEjectionTimestamp.IsZero() && addrInfo.ejectionTimeMultiplier > 0 { + addrInfo.ejectionTimeMultiplier-- + continue + } + if addrInfo.latestEjectionTimestamp.IsZero() { + // Address is already not ejected, so no need to check for whether + // to uneject the address below. + continue + } + et := time.Duration(b.cfg.BaseEjectionTime) * time.Duration(addrInfo.ejectionTimeMultiplier) + met := max(time.Duration(b.cfg.BaseEjectionTime), time.Duration(b.cfg.MaxEjectionTime)) + uet := addrInfo.latestEjectionTimestamp.Add(min(et, met)) + if now().After(uet) { + b.unejectAddress(addrInfo) + } + } + + // This conditional only for testing (since the interval timer algorithm is + // called manually), will never hit in production. + if b.intervalTimer != nil { + b.intervalTimer.Stop() + } + b.intervalTimer = afterFunc(time.Duration(b.cfg.Interval), b.intervalTimerAlgorithm) +} + +// addrsWithAtLeastRequestVolume returns a slice of address information of all +// addresses with at least request volume passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) addrsWithAtLeastRequestVolume(requestVolume uint32) []*addressInfo { + var addrs []*addressInfo + for _, addrInfo := range b.addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + if rv >= requestVolume { + addrs = append(addrs, addrInfo) + } + } + return addrs +} + +// meanAndStdDev returns the mean and std dev of the fractions of successful +// requests of the addresses passed in. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) meanAndStdDev(addrs []*addressInfo) (float64, float64) { + var totalFractionOfSuccessfulRequests float64 + var mean float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + totalFractionOfSuccessfulRequests += float64(bucket.numSuccesses) / float64(rv) + } + mean = totalFractionOfSuccessfulRequests / float64(len(addrs)) + var sumOfSquares float64 + for _, addrInfo := range addrs { + bucket := addrInfo.callCounter.inactiveBucket + rv := bucket.numSuccesses + bucket.numFailures + devFromMean := (float64(bucket.numSuccesses) / float64(rv)) - mean + sumOfSquares += devFromMean * devFromMean + } + variance := sumOfSquares / float64(len(addrs)) + return mean, math.Sqrt(variance) +} + +// successRateAlgorithm ejects any addresses where the success rate falls below +// the other addresses according to mean and standard deviation, and if overall +// applicable from other set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) successRateAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.SuccessRateEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.SuccessRateEjection.MinimumHosts) { + return + } + mean, stddev := b.meanAndStdDev(addrsToConsider) + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.SuccessRateEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + successRate := float64(bucket.numSuccesses) / float64(bucket.numSuccesses+bucket.numFailures) + requiredSuccessRate := mean - stddev*(float64(ejectionCfg.StdevFactor)/1000) + if successRate < requiredSuccessRate { + channelz.Infof(logger, b.channelzParentID, "SuccessRate algorithm detected outlier: %s. Parameters: successRate=%f, mean=%f, stddev=%f, requiredSuccessRate=%f", addrInfo, successRate, mean, stddev, requiredSuccessRate) + if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// failurePercentageAlgorithm ejects any addresses where the failure percentage +// rate exceeds a set enforcement percentage, if overall applicable from other +// set heuristics. +// +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) failurePercentageAlgorithm() { + addrsToConsider := b.addrsWithAtLeastRequestVolume(b.cfg.FailurePercentageEjection.RequestVolume) + if len(addrsToConsider) < int(b.cfg.FailurePercentageEjection.MinimumHosts) { + return + } + + for _, addrInfo := range addrsToConsider { + bucket := addrInfo.callCounter.inactiveBucket + ejectionCfg := b.cfg.FailurePercentageEjection + if float64(b.numAddrsEjected)/float64(len(b.addrs))*100 >= float64(b.cfg.MaxEjectionPercent) { + return + } + failurePercentage := (float64(bucket.numFailures) / float64(bucket.numSuccesses+bucket.numFailures)) * 100 + if failurePercentage > float64(b.cfg.FailurePercentageEjection.Threshold) { + channelz.Infof(logger, b.channelzParentID, "FailurePercentage algorithm detected outlier: %s, failurePercentage=%f", addrInfo, failurePercentage) + if uint32(grpcrand.Int31n(100)) < ejectionCfg.EnforcementPercentage { + b.ejectAddress(addrInfo) + } + } + } +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) ejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected++ + addrInfo.latestEjectionTimestamp = b.timerStartTime + addrInfo.ejectionTimeMultiplier++ + for _, sbw := range addrInfo.sws { + sbw.eject() + channelz.Infof(logger, b.channelzParentID, "Subchannel ejected: %s", sbw) + } + +} + +// Caller must hold b.mu. +func (b *outlierDetectionBalancer) unejectAddress(addrInfo *addressInfo) { + b.numAddrsEjected-- + addrInfo.latestEjectionTimestamp = time.Time{} + for _, sbw := range addrInfo.sws { + sbw.uneject() + channelz.Infof(logger, b.channelzParentID, "Subchannel unejected: %s", sbw) + } +} + +// addressInfo contains the runtime information about an address that pertains +// to Outlier Detection. This struct and all of its fields is protected by +// outlierDetectionBalancer.mu in the case where it is accessed through the +// address map. In the case of Picker callbacks, the writes to the activeBucket +// of callCounter are protected by atomically loading and storing +// unsafe.Pointers (see further explanation in incrementCounter()). +type addressInfo struct { + // The call result counter object. + callCounter *callCounter + + // The latest ejection timestamp, or zero if the address is currently not + // ejected. + latestEjectionTimestamp time.Time + + // The current ejection time multiplier, starting at 0. + ejectionTimeMultiplier int64 + + // A list of subchannel wrapper objects that correspond to this address. + sws []*subConnWrapper +} + +func (a *addressInfo) String() string { + var res strings.Builder + res.WriteString("[") + for _, sw := range a.sws { + res.WriteString(sw.String()) + } + res.WriteString("]") + return res.String() +} + +func newAddressInfo() *addressInfo { + return &addressInfo{ + callCounter: newCallCounter(), + } +} diff --git a/xds/internal/balancer/outlierdetection/balancer_test.go b/xds/internal/balancer/outlierdetection/balancer_test.go new file mode 100644 index 000000000000..3d1efe8dcd56 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/balancer_test.go @@ -0,0 +1,1575 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package outlierdetection + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clusterimpl" +) + +var ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestParseConfig verifies the ParseConfig() method in the Outlier Detection +// Balancer. +func (s) TestParseConfig(t *testing.T) { + const errParseConfigName = "errParseConfigBalancer" + stub.Register(errParseConfigName, stub.BalancerFuncs{ + ParseConfig: func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return nil, errors.New("some error") + }, + }) + + parser := bb{} + const ( + defaultInterval = iserviceconfig.Duration(10 * time.Second) + defaultBaseEjectionTime = iserviceconfig.Duration(30 * time.Second) + defaultMaxEjectionTime = iserviceconfig.Duration(300 * time.Second) + defaultMaxEjectionPercent = 10 + defaultSuccessRateStdevFactor = 1900 + defaultEnforcingSuccessRate = 100 + defaultSuccessRateMinimumHosts = 5 + defaultSuccessRateRequestVolume = 100 + defaultFailurePercentageThreshold = 85 + defaultEnforcingFailurePercentage = 0 + defaultFailurePercentageMinimumHosts = 5 + defaultFailurePercentageRequestVolume = 50 + ) + tests := []struct { + name string + input string + wantCfg serviceconfig.LoadBalancingConfig + wantErr string + }{ + { + name: "no-fields-set-should-get-default", + input: `{ + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + + { + name: "some-top-level-fields-set", + input: `{ + "interval": "15s", + "maxEjectionTime": "350s", + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get set fields + defaults for unset fields. + wantCfg: &LBConfig{ + Interval: iserviceconfig.Duration(15 * time.Second), + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: iserviceconfig.Duration(350 * time.Second), + MaxEjectionPercent: defaultMaxEjectionPercent, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "success-rate-ejection-present-but-no-fields", + input: `{ + "successRateEjection": {}, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get defaults of success-rate-ejection struct. + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: defaultSuccessRateStdevFactor, + EnforcementPercentage: defaultEnforcingSuccessRate, + MinimumHosts: defaultSuccessRateMinimumHosts, + RequestVolume: defaultSuccessRateRequestVolume, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "success-rate-ejection-present-partially-set", + input: `{ + "successRateEjection": { + "stdevFactor": 1000, + "minimumHosts": 5 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get set fields + defaults for others in success rate + // ejection layer. + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1000, + EnforcementPercentage: defaultEnforcingSuccessRate, + MinimumHosts: 5, + RequestVolume: defaultSuccessRateRequestVolume, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "success-rate-ejection-present-fully-set", + input: `{ + "successRateEjection": { + "stdevFactor": 1000, + "enforcementPercentage": 50, + "minimumHosts": 5, + "requestVolume": 50 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1000, + EnforcementPercentage: 50, + MinimumHosts: 5, + RequestVolume: 50, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "failure-percentage-ejection-present-but-no-fields", + input: `{ + "failurePercentageEjection": {}, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get defaults of failure percentage ejection layer. + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: defaultFailurePercentageThreshold, + EnforcementPercentage: defaultEnforcingFailurePercentage, + MinimumHosts: defaultFailurePercentageMinimumHosts, + RequestVolume: defaultFailurePercentageRequestVolume, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "failure-percentage-ejection-present-partially-set", + input: `{ + "failurePercentageEjection": { + "threshold": 80, + "minimumHosts": 10 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + // Should get set fields + defaults for others in success rate + // ejection layer. + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 80, + EnforcementPercentage: defaultEnforcingFailurePercentage, + MinimumHosts: 10, + RequestVolume: defaultFailurePercentageRequestVolume, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "failure-percentage-ejection-present-fully-set", + input: `{ + "failurePercentageEjection": { + "threshold": 80, + "enforcementPercentage": 100, + "minimumHosts": 10, + "requestVolume": 40 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: defaultInterval, + BaseEjectionTime: defaultBaseEjectionTime, + MaxEjectionTime: defaultMaxEjectionTime, + MaxEjectionPercent: defaultMaxEjectionPercent, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 80, + EnforcementPercentage: 100, + MinimumHosts: 10, + RequestVolume: 40, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { // to make sure zero values aren't overwritten by defaults + name: "lb-config-every-field-set-zero-value", + input: `{ + "interval": "0s", + "baseEjectionTime": "0s", + "maxEjectionTime": "0s", + "maxEjectionPercent": 0, + "successRateEjection": { + "stdevFactor": 0, + "enforcementPercentage": 0, + "minimumHosts": 0, + "requestVolume": 0 + }, + "failurePercentageEjection": { + "threshold": 0, + "enforcementPercentage": 0, + "minimumHosts": 0, + "requestVolume": 0 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + SuccessRateEjection: &SuccessRateEjection{}, + FailurePercentageEjection: &FailurePercentageEjection{}, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "lb-config-every-field-set", + input: `{ + "interval": "10s", + "baseEjectionTime": "30s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 10, + "successRateEjection": { + "stdevFactor": 1900, + "enforcementPercentage": 100, + "minimumHosts": 5, + "requestVolume": 100 + }, + "failurePercentageEjection": { + "threshold": 85, + "enforcementPercentage": 5, + "minimumHosts": 5, + "requestVolume": 50 + }, + "childPolicy": [ + { + "xds_cluster_impl_experimental": { + "cluster": "test_cluster" + } + } + ] + }`, + wantCfg: &LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 85, + EnforcementPercentage: 5, + MinimumHosts: 5, + RequestVolume: 50, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "xds_cluster_impl_experimental", + Config: &clusterimpl.LBConfig{ + Cluster: "test_cluster", + }, + }, + }, + }, + { + name: "interval-is-negative", + input: `{"interval": "-10s"}`, + wantErr: "OutlierDetectionLoadBalancingConfig.interval = -10s; must be >= 0", + }, + { + name: "base-ejection-time-is-negative", + input: `{"baseEjectionTime": "-10s"}`, + wantErr: "OutlierDetectionLoadBalancingConfig.base_ejection_time = -10s; must be >= 0", + }, + { + name: "max-ejection-time-is-negative", + input: `{"maxEjectionTime": "-10s"}`, + wantErr: "OutlierDetectionLoadBalancingConfig.max_ejection_time = -10s; must be >= 0", + }, + { + name: "max-ejection-percent-is-greater-than-100", + input: `{"maxEjectionPercent": 150}`, + wantErr: "OutlierDetectionLoadBalancingConfig.max_ejection_percent = 150; must be <= 100", + }, + { + name: "enforcement-percentage-success-rate-is-greater-than-100", + input: `{ + "successRateEjection": { + "enforcementPercentage": 150 + } + }`, + wantErr: "OutlierDetectionLoadBalancingConfig.SuccessRateEjection.enforcement_percentage = 150; must be <= 100", + }, + { + name: "failure-percentage-threshold-is-greater-than-100", + input: `{ + "failurePercentageEjection": { + "threshold": 150 + } + }`, + wantErr: "OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.threshold = 150; must be <= 100", + }, + { + name: "enforcement-percentage-failure-percentage-ejection-is-greater-than-100", + input: `{ + "failurePercentageEjection": { + "enforcementPercentage": 150 + } + }`, + wantErr: "OutlierDetectionLoadBalancingConfig.FailurePercentageEjection.enforcement_percentage = 150; must be <= 100", + }, + { + name: "child-policy-present-but-parse-error", + input: `{ + "childPolicy": [ + { + "errParseConfigBalancer": { + "cluster": "test_cluster" + } + } + ] + }`, + wantErr: "error parsing loadBalancingConfig for policy \"errParseConfigBalancer\"", + }, + { + name: "no-supported-child-policy", + input: `{ + "childPolicy": [ + { + "doesNotExistBalancer": { + "cluster": "test_cluster" + } + } + ] + }`, + wantErr: "invalid loadBalancingConfig: no supported policies found", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gotCfg, gotErr := parser.ParseConfig(json.RawMessage(test.input)) + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("ParseConfig(%v) = %v, wantErr %v", test.input, gotErr, test.wantErr) + } + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("ParseConfig(%v) = %v, wantErr %v", test.input, gotErr, test.wantErr) + } + if test.wantErr != "" { + return + } + if diff := cmp.Diff(gotCfg, test.wantCfg); diff != "" { + t.Fatalf("parseConfig(%v) got unexpected output, diff (-got +want): %v", string(test.input), diff) + } + }) + } +} + +func (lbc *LBConfig) Equal(lbc2 *LBConfig) bool { + if !lbc.EqualIgnoringChildPolicy(lbc2) { + return false + } + return cmp.Equal(lbc.ChildPolicy, lbc2.ChildPolicy) +} + +type subConnWithState struct { + sc balancer.SubConn + state balancer.SubConnState +} + +func setup(t *testing.T) (*outlierDetectionBalancer, *testutils.TestClientConn, func()) { + t.Helper() + builder := balancer.Get(Name) + if builder == nil { + t.Fatalf("balancer.Get(%q) returned nil", Name) + } + tcc := testutils.NewTestClientConn(t) + odB := builder.Build(tcc, balancer.BuildOptions{ChannelzParentID: channelz.NewIdentifierForTesting(channelz.RefChannel, time.Now().Unix(), nil)}) + return odB.(*outlierDetectionBalancer), tcc, odB.Close +} + +type emptyChildConfig struct { + serviceconfig.LoadBalancingConfig +} + +// TestChildBasicOperations tests basic operations of the Outlier Detection +// Balancer and it's interaction with it's child. The following scenarios are +// tested, in a step by step fashion: +// 1. The Outlier Detection Balancer receives it's first good configuration. The +// balancer is expected to create a child and sent the child it's configuration. +// 2. The Outlier Detection Balancer receives new configuration that specifies a +// child's type, and the new type immediately reports READY inline. The first +// child balancer should be closed and the second child balancer should receive +// a config update. +// 3. The Outlier Detection Balancer is closed. The second child balancer should +// be closed. +func (s) TestChildBasicOperations(t *testing.T) { + bc := emptyChildConfig{} + + ccsCh := testutils.NewChannel() + closeCh := testutils.NewChannel() + + stub.Register(t.Name()+"child1", stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + ccsCh.Send(ccs.BalancerConfig) + return nil + }, + Close: func(bd *stub.BalancerData) { + closeCh.Send(nil) + }, + }) + + stub.Register(t.Name()+"child2", stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + // UpdateState inline to READY to complete graceful switch process + // synchronously from any UpdateClientConnState call. + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &testutils.TestConstPicker{}, + }) + ccsCh.Send(nil) + return nil + }, + Close: func(bd *stub.BalancerData) { + closeCh.Send(nil) + }, + }) + + od, tcc, _ := setup(t) + + // This first config update should cause a child to be built and forwarded + // it's first update. + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name() + "child1", + Config: bc, + }, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cr, err := ccsCh.Receive(ctx) + if err != nil { + t.Fatalf("timed out waiting for UpdateClientConnState on the first child balancer: %v", err) + } + if _, ok := cr.(emptyChildConfig); !ok { + t.Fatalf("Received child policy config of type %T, want %T", cr, emptyChildConfig{}) + } + + // This Update Client Conn State call should cause the first child balancer + // to close, and a new child to be created and also forwarded it's first + // config update. + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name() + "child2", + Config: emptyChildConfig{}, + }, + }, + }) + + // Verify inline UpdateState() call from the new child eventually makes it's + // way to the Test Client Conn. + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case state := <-tcc.NewStateCh: + if state != connectivity.Ready { + t.Fatalf("ClientConn received connectivity state %v, want %v", state, connectivity.Ready) + } + } + + // Verify the first child balancer closed. + if _, err = closeCh.Receive(ctx); err != nil { + t.Fatalf("timed out waiting for the first child balancer to be closed: %v", err) + } + // Verify the second child balancer received it's first config update. + if _, err = ccsCh.Receive(ctx); err != nil { + t.Fatalf("timed out waiting for UpdateClientConnState on the second child balancer: %v", err) + } + // Closing the Outlier Detection Balancer should close the newly created + // child. + od.Close() + if _, err = closeCh.Receive(ctx); err != nil { + t.Fatalf("timed out waiting for the second child balancer to be closed: %v", err) + } +} + +// TestUpdateAddresses tests the functionality of UpdateAddresses and any +// changes in the addresses/plurality of those addresses for a SubConn. The +// Balancer is set up with two upstreams, with one of the upstreams being +// ejected. Initially, there is one SubConn for each address. The following +// scenarios are tested, in a step by step fashion: +// 1. The SubConn not currently ejected switches addresses to the address that +// is ejected. This should cause the SubConn to get ejected. +// 2. Update this same SubConn to multiple addresses. This should cause the +// SubConn to get unejected, as it is no longer being tracked by Outlier +// Detection at that point. +// 3. Update this same SubConn to different addresses, still multiple. This +// should be a noop, as the SubConn is still no longer being tracked by Outlier +// Detection. +// 4. Update this same SubConn to the a single address which is ejected. This +// should cause the SubConn to be ejected. +func (s) TestUpdateAddresses(t *testing.T) { + scsCh := testutils.NewChannel() + var scw1, scw2 balancer.SubConn + var err error + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + scw1, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw2, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address2"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw1, scw2}, + }, + }) + return nil + }, + UpdateSubConnState: func(_ *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + scsCh.Send(subConnWithState{ + sc: sc, + state: state, + }) + }}) + + od, tcc, cleanup := setup(t) + defer cleanup() + + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 50, + EnforcementPercentage: 100, + MinimumHosts: 2, + RequestVolume: 3, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Setup the system to where one address is ejected and one address + // isn't. + select { + case <-ctx.Done(): + t.Fatal("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + // Simulate 5 successful RPC calls on the first SubConn (the first call + // to picker.Pick). + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + pi, err = picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + // Simulate 5 failed RPC calls on the second SubConn (the second call to + // picker.Pick). Thus, when the interval timer algorithm is run, the + // second SubConn's address should be ejected, which will allow us to + // further test UpdateAddresses() logic. + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{Err: errors.New("some error")}) + } + od.intervalTimerAlgorithm() + // verify UpdateSubConnState() got called with TRANSIENT_FAILURE for + // child with address that was ejected. + gotSCWS, err := scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw2, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + } + + // Update scw1 to another address that is currently ejected. This should + // cause scw1 to get ejected. + od.UpdateAddresses(scw1, []resolver.Address{{Addr: "address2"}}) + + // Verify that update addresses gets forwarded to ClientConn. + select { + case <-ctx.Done(): + t.Fatal("timeout while waiting for a UpdateState call on the ClientConn") + case <-tcc.UpdateAddressesAddrsCh: + } + // Verify scw1 got ejected (UpdateSubConnState called with TRANSIENT + // FAILURE). + gotSCWS, err := scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw1, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + + // Update scw1 to multiple addresses. This should cause scw1 to get + // unejected, as is it no longer being tracked for Outlier Detection. + od.UpdateAddresses(scw1, []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + }) + // Verify scw1 got unejected (UpdateSubConnState called with recent state). + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw1, + state: balancer.SubConnState{ConnectivityState: connectivity.Idle}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + + // Update scw1 to a different multiple addresses list. A change of addresses + // in which the plurality goes from multiple to multiple should be a no-op, + // as the address continues to be ignored by outlier detection. + od.UpdateAddresses(scw1, []resolver.Address{ + {Addr: "address2"}, + {Addr: "address3"}, + }) + // Verify no downstream effects. + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("no SubConn update should have been sent (no SubConn got ejected/unejected)") + } + + // Update scw1 back to a single address, which is ejected. This should cause + // the SubConn to be re-ejected. + od.UpdateAddresses(scw1, []resolver.Address{{Addr: "address2"}}) + // Verify scw1 got ejected (UpdateSubConnState called with TRANSIENT FAILURE). + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw1, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } +} + +func scwsEqual(gotSCWS subConnWithState, wantSCWS subConnWithState) error { + if !cmp.Equal(gotSCWS, wantSCWS, cmp.AllowUnexported(subConnWithState{}, testutils.TestSubConn{}, subConnWrapper{}, addressInfo{}), cmpopts.IgnoreFields(subConnWrapper{}, "scUpdateCh")) { + return fmt.Errorf("received SubConnState: %+v, want %+v", gotSCWS, wantSCWS) + } + return nil +} + +type rrPicker struct { + scs []balancer.SubConn + next int +} + +func (rrp *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + sc := rrp.scs[rrp.next] + rrp.next = (rrp.next + 1) % len(rrp.scs) + return balancer.PickResult{SubConn: sc}, nil +} + +// TestDurationOfInterval tests the configured interval timer. +// The following scenarios are tested: +// 1. The Outlier Detection Balancer receives it's first config. The balancer +// should configure the timer with whatever is directly specified on the config. +// 2. The Outlier Detection Balancer receives a subsequent config. The balancer +// should configure with whatever interval is configured minus the difference +// between the current time and the previous start timestamp. +// 3. The Outlier Detection Balancer receives a no-op configuration. The +// balancer should not configure a timer at all. +func (s) TestDurationOfInterval(t *testing.T) { + stub.Register(t.Name(), stub.BalancerFuncs{}) + + od, _, cleanup := setup(t) + defer func(af func(d time.Duration, f func()) *time.Timer) { + cleanup() + afterFunc = af + }(afterFunc) + + durationChan := testutils.NewChannel() + afterFunc = func(dur time.Duration, _ func()) *time.Timer { + durationChan.Send(dur) + return time.NewTimer(math.MaxInt64) + } + + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: iserviceconfig.Duration(8 * time.Second), + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + d, err := durationChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving duration from afterFunc() call: %v", err) + } + dur := d.(time.Duration) + // The configured duration should be 8 seconds - what the balancer was + // configured with. + if dur != 8*time.Second { + t.Fatalf("configured duration should have been 8 seconds to start timer") + } + + // Override time.Now to time.Now() + 5 seconds. This will represent 5 + // seconds already passing for the next check in UpdateClientConnState. + defer func(n func() time.Time) { + now = n + }(now) + now = func() time.Time { + return time.Now().Add(time.Second * 5) + } + + // UpdateClientConnState with an interval of 9 seconds. Due to 5 seconds + // already passing (from overridden time.Now function), this should start an + // interval timer of ~4 seconds. + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: iserviceconfig.Duration(9 * time.Second), + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 1900, + EnforcementPercentage: 100, + MinimumHosts: 5, + RequestVolume: 100, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + d, err = durationChan.Receive(ctx) + if err != nil { + t.Fatalf("Error receiving duration from afterFunc() call: %v", err) + } + dur = d.(time.Duration) + if dur.Seconds() < 3.5 || 4.5 < dur.Seconds() { + t.Fatalf("configured duration should have been around 4 seconds to start timer") + } + + // UpdateClientConnState with a no-op config. This shouldn't configure the + // interval timer at all due to it being a no-op. + od.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + Interval: iserviceconfig.Duration(10 * time.Second), + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + // No timer should have been started. + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err = durationChan.Receive(sCtx); err == nil { + t.Fatal("No timer should have started.") + } +} + +// TestEjectUnejectSuccessRate tests the functionality of the interval timer +// algorithm when configured with SuccessRateEjection. The Outlier Detection +// Balancer will be set up with 3 SubConns, each with a different address. +// It tests the following scenarios, in a step by step fashion: +// 1. The three addresses each have 5 successes. The interval timer algorithm should +// not eject any of the addresses. +// 2. Two of the addresses have 5 successes, the third has five failures. The +// interval timer algorithm should eject the third address with five failures. +// 3. The interval timer algorithm is run at a later time past max ejection +// time. The interval timer algorithm should uneject the third address. +func (s) TestEjectUnejectSuccessRate(t *testing.T) { + scsCh := testutils.NewChannel() + var scw1, scw2, scw3 balancer.SubConn + var err error + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + scw1, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw2, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address2"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw3, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address3"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw1, scw2, scw3}, + }, + }) + return nil + }, + UpdateSubConnState: func(_ *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + scsCh.Send(subConnWithState{ + sc: sc, + state: state, + }) + }, + }) + + od, tcc, cleanup := setup(t) + defer func() { + cleanup() + }() + + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + {Addr: "address3"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, // so the interval will never run unless called manually in test. + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 50, + EnforcementPercentage: 100, + MinimumHosts: 3, + RequestVolume: 3, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Set each of the three upstream addresses to have five successes each. + // This should cause none of the addresses to be ejected as none of them + // are outliers according to the success rate algorithm. + for i := 0; i < 3; i++ { + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + } + + od.intervalTimerAlgorithm() + + // verify no UpdateSubConnState() call on the child, as no addresses got + // ejected (ejected address will cause an UpdateSubConnState call). + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("no SubConn update should have been sent (no SubConn got ejected)") + } + + // Since no addresses are ejected, a SubConn update should forward down + // to the child. + od.UpdateSubConnState(scw1.(*subConnWrapper).SubConn, balancer.SubConnState{ + ConnectivityState: connectivity.Connecting, + }) + + gotSCWS, err := scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw1, + state: balancer.SubConnState{ConnectivityState: connectivity.Connecting}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + + // Set two of the upstream addresses to have five successes each, and + // one of the upstream addresses to have five failures. This should + // cause the address which has five failures to be ejected according to + // the SuccessRateAlgorithm. + for i := 0; i < 2; i++ { + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + } + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{Err: errors.New("some error")}) + } + + // should eject address that always errored. + od.intervalTimerAlgorithm() + // Due to the address being ejected, the SubConn with that address + // should be ejected, meaning a TRANSIENT_FAILURE connectivity state + // gets reported to the child. + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw3, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + // Only one address should be ejected. + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("Only one SubConn update should have been sent (only one SubConn got ejected)") + } + + // Now that an address is ejected, SubConn updates for SubConns using + // that address should not be forwarded downward. These SubConn updates + // will be cached to update the child sometime in the future when the + // address gets unejected. + od.UpdateSubConnState(pi.SubConn, balancer.SubConnState{ + ConnectivityState: connectivity.Connecting, + }) + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("SubConn update should not have been forwarded (the SubConn is ejected)") + } + + // Override now to cause the interval timer algorithm to always uneject + // the ejected address. This will always uneject the ejected address + // because this time is set way past the max ejection time set in the + // configuration, which will make the next interval timer algorithm run + // uneject any ejected addresses. + defer func(n func() time.Time) { + now = n + }(now) + now = func() time.Time { + return time.Now().Add(time.Second * 1000) + } + od.intervalTimerAlgorithm() + + // unejected SubConn should report latest persisted state - which is + // connecting from earlier. + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw3, + state: balancer.SubConnState{ConnectivityState: connectivity.Connecting}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + } +} + +// TestEjectFailureRate tests the functionality of the interval timer algorithm +// when configured with FailurePercentageEjection, and also the functionality of +// noop configuration. The Outlier Detection Balancer will be set up with 3 +// SubConns, each with a different address. It tests the following scenarios, in +// a step by step fashion: +// 1. The three addresses each have 5 successes. The interval timer algorithm +// should not eject any of the addresses. +// 2. Two of the addresses have 5 successes, the third has five failures. The +// interval timer algorithm should eject the third address with five failures. +// 3. The Outlier Detection Balancer receives a subsequent noop config update. +// The balancer should uneject all ejected addresses. +func (s) TestEjectFailureRate(t *testing.T) { + scsCh := testutils.NewChannel() + var scw1, scw2, scw3 balancer.SubConn + var err error + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, _ balancer.ClientConnState) error { + if scw1 != nil { // UpdateClientConnState was already called, no need to recreate SubConns. + return nil + } + scw1, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw2, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address2"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + scw3, err = bd.ClientConn.NewSubConn([]resolver.Address{{Addr: "address3"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Errorf("error in od.NewSubConn call: %v", err) + } + return nil + }, + UpdateSubConnState: func(_ *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + scsCh.Send(subConnWithState{ + sc: sc, + state: state, + }) + }, + }) + + od, tcc, cleanup := setup(t) + defer func() { + cleanup() + }() + + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + {Addr: "address3"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, // so the interval will never run unless called manually in test. + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + SuccessRateEjection: &SuccessRateEjection{ + StdevFactor: 500, + EnforcementPercentage: 100, + MinimumHosts: 3, + RequestVolume: 3, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + od.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw1, scw2, scw3}, + }, + }) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker := <-tcc.NewPickerCh: + // Set each upstream address to have five successes each. This should + // cause none of the addresses to be ejected as none of them are below + // the failure percentage threshold. + for i := 0; i < 3; i++ { + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + } + + od.intervalTimerAlgorithm() + sCtx, cancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("no SubConn update should have been sent (no SubConn got ejected)") + } + + // Set two upstream addresses to have five successes each, and one + // upstream address to have five failures. This should cause the address + // with five failures to be ejected according to the Failure Percentage + // Algorithm. + for i := 0; i < 2; i++ { + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{}) + } + } + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + t.Fatalf("picker.Pick failed with error: %v", err) + } + for c := 0; c < 5; c++ { + pi.Done(balancer.DoneInfo{Err: errors.New("some error")}) + } + + // should eject address that always errored. + od.intervalTimerAlgorithm() + + // verify UpdateSubConnState() got called with TRANSIENT_FAILURE for + // child in address that was ejected. + gotSCWS, err := scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw3, + state: balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + + // verify only one address got ejected. + sCtx, cancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer cancel() + if _, err := scsCh.Receive(sCtx); err == nil { + t.Fatalf("Only one SubConn update should have been sent (only one SubConn got ejected)") + } + + // upon the Outlier Detection balancer being reconfigured with a noop + // configuration, every ejected SubConn should be unejected. + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + {Addr: "address3"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + gotSCWS, err = scsCh.Receive(ctx) + if err != nil { + t.Fatalf("Error waiting for Sub Conn update: %v", err) + } + if err = scwsEqual(gotSCWS.(subConnWithState), subConnWithState{ + sc: scw3, + state: balancer.SubConnState{ConnectivityState: connectivity.Idle}, + }); err != nil { + t.Fatalf("Error in Sub Conn update: %v", err) + } + } +} + +// TestConcurrentOperations calls different operations on the balancer in +// separate goroutines to test for any race conditions and deadlocks. It also +// uses a child balancer which verifies that no operations on the child get +// called after the child balancer is closed. +func (s) TestConcurrentOperations(t *testing.T) { + closed := grpcsync.NewEvent() + stub.Register(t.Name(), stub.BalancerFuncs{ + UpdateClientConnState: func(*stub.BalancerData, balancer.ClientConnState) error { + if closed.HasFired() { + t.Error("UpdateClientConnState was called after Close(), which breaks the balancer API") + } + return nil + }, + ResolverError: func(*stub.BalancerData, error) { + if closed.HasFired() { + t.Error("ResolverError was called after Close(), which breaks the balancer API") + } + }, + UpdateSubConnState: func(*stub.BalancerData, balancer.SubConn, balancer.SubConnState) { + if closed.HasFired() { + t.Error("UpdateSubConnState was called after Close(), which breaks the balancer API") + } + }, + Close: func(*stub.BalancerData) { + closed.Fire() + }, + ExitIdle: func(*stub.BalancerData) { + if closed.HasFired() { + t.Error("ExitIdle was called after Close(), which breaks the balancer API") + } + }, + }) + + od, tcc, cleanup := setup(t) + defer func() { + cleanup() + }() + + od.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + {Addr: "address1"}, + {Addr: "address2"}, + {Addr: "address3"}, + }, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, // so the interval will never run unless called manually in test. + BaseEjectionTime: iserviceconfig.Duration(30 * time.Second), + MaxEjectionTime: iserviceconfig.Duration(300 * time.Second), + MaxEjectionPercent: 10, + SuccessRateEjection: &SuccessRateEjection{ // Have both Success Rate and Failure Percentage to step through all the interval timer code + StdevFactor: 500, + EnforcementPercentage: 100, + MinimumHosts: 3, + RequestVolume: 3, + }, + FailurePercentageEjection: &FailurePercentageEjection{ + Threshold: 50, + EnforcementPercentage: 100, + MinimumHosts: 3, + RequestVolume: 3, + }, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + scw1, err := od.NewSubConn([]resolver.Address{{Addr: "address1"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error in od.NewSubConn call: %v", err) + } + if err != nil { + t.Fatalf("error in od.NewSubConn call: %v", err) + } + + scw2, err := od.NewSubConn([]resolver.Address{{Addr: "address2"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error in od.NewSubConn call: %v", err) + } + + scw3, err := od.NewSubConn([]resolver.Address{{Addr: "address3"}}, balancer.NewSubConnOptions{}) + if err != nil { + t.Fatalf("error in od.NewSubConn call: %v", err) + } + + od.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw2, scw3}, + }, + }) + + var picker balancer.Picker + select { + case <-ctx.Done(): + t.Fatalf("timeout while waiting for a UpdateState call on the ClientConn") + case picker = <-tcc.NewPickerCh: + } + + finished := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-finished: + return + default: + } + pi, err := picker.Pick(balancer.PickInfo{}) + if err != nil { + continue + } + pi.Done(balancer.DoneInfo{}) + pi.Done(balancer.DoneInfo{Err: errors.New("some error")}) + time.Sleep(1 * time.Nanosecond) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-finished: + return + default: + } + od.intervalTimerAlgorithm() + } + }() + + // call Outlier Detection's balancer.ClientConn operations asynchronously. + // balancer.ClientConn operations have no guarantee from the API to be + // called synchronously. + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-finished: + return + default: + } + od.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &rrPicker{ + scs: []balancer.SubConn{scw2, scw3}, + }, + }) + time.Sleep(1 * time.Nanosecond) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + od.NewSubConn([]resolver.Address{{Addr: "address4"}}, balancer.NewSubConnOptions{}) + }() + + wg.Add(1) + go func() { + defer wg.Done() + od.RemoveSubConn(scw1) + }() + + wg.Add(1) + go func() { + defer wg.Done() + od.UpdateAddresses(scw2, []resolver.Address{{Addr: "address3"}}) + }() + + // Call balancer.Balancers synchronously in this goroutine, upholding the + // balancer.Balancer API guarantee of synchronous calls. + od.UpdateClientConnState(balancer.ClientConnState{ // This will delete addresses and flip to no op + ResolverState: resolver.State{ + Addresses: []resolver.Address{{Addr: "address1"}}, + }, + BalancerConfig: &LBConfig{ + Interval: math.MaxInt64, + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: t.Name(), + Config: emptyChildConfig{}, + }, + }, + }) + + // Call balancer.Balancers synchronously in this goroutine, upholding the + // balancer.Balancer API guarantee. + od.UpdateSubConnState(scw1.(*subConnWrapper).SubConn, balancer.SubConnState{ + ConnectivityState: connectivity.Connecting, + }) + od.ResolverError(errors.New("some error")) + od.ExitIdle() + od.Close() + close(finished) + wg.Wait() +} diff --git a/xds/internal/balancer/outlierdetection/callcounter.go b/xds/internal/balancer/outlierdetection/callcounter.go new file mode 100644 index 000000000000..4597f727b6e0 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/callcounter.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "sync/atomic" + "unsafe" +) + +type bucket struct { + numSuccesses uint32 + numFailures uint32 +} + +func newCallCounter() *callCounter { + return &callCounter{ + activeBucket: unsafe.Pointer(&bucket{}), + inactiveBucket: &bucket{}, + } +} + +// callCounter has two buckets, which each count successful and failing RPC's. +// The activeBucket is used to actively count any finished RPC's, and the +// inactiveBucket is populated with this activeBucket's data every interval for +// use by the Outlier Detection algorithm. +type callCounter struct { + // activeBucket updates every time a call finishes (from picker passed to + // Client Conn), so protect pointer read with atomic load of unsafe.Pointer + // so picker does not have to grab a mutex per RPC, the critical path. + activeBucket unsafe.Pointer // bucket + inactiveBucket *bucket +} + +func (cc *callCounter) clear() { + atomic.StorePointer(&cc.activeBucket, unsafe.Pointer(&bucket{})) + cc.inactiveBucket = &bucket{} +} + +// "When the timer triggers, the inactive bucket is zeroed and swapped with the +// active bucket. Then the inactive bucket contains the number of successes and +// failures since the last time the timer triggered. Those numbers are used to +// evaluate the ejection criteria." - A50. +func (cc *callCounter) swap() { + ib := cc.inactiveBucket + *ib = bucket{} + ab := (*bucket)(atomic.SwapPointer(&cc.activeBucket, unsafe.Pointer(ib))) + cc.inactiveBucket = &bucket{ + numSuccesses: atomic.LoadUint32(&ab.numSuccesses), + numFailures: atomic.LoadUint32(&ab.numFailures), + } +} diff --git a/xds/internal/balancer/outlierdetection/callcounter_test.go b/xds/internal/balancer/outlierdetection/callcounter_test.go new file mode 100644 index 000000000000..8e4f5f29b5f8 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/callcounter_test.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "sync/atomic" + "testing" + "unsafe" + + "github.com/google/go-cmp/cmp" +) + +func (b1 *bucket) Equal(b2 *bucket) bool { + if b1 == nil && b2 == nil { + return true + } + if (b1 != nil) != (b2 != nil) { + return false + } + if b1.numSuccesses != b2.numSuccesses { + return false + } + return b1.numFailures == b2.numFailures +} + +func (cc1 *callCounter) Equal(cc2 *callCounter) bool { + if cc1 == nil && cc2 == nil { + return true + } + if (cc1 != nil) != (cc2 != nil) { + return false + } + ab1 := (*bucket)(atomic.LoadPointer(&cc1.activeBucket)) + ab2 := (*bucket)(atomic.LoadPointer(&cc2.activeBucket)) + if !ab1.Equal(ab2) { + return false + } + return cc1.inactiveBucket.Equal(cc2.inactiveBucket) +} + +// TestClear tests that clear on the call counter clears (everything set to 0) +// the active and inactive buckets. +func (s) TestClear(t *testing.T) { + cc := newCallCounter() + ab := (*bucket)(atomic.LoadPointer(&cc.activeBucket)) + ab.numSuccesses = 1 + ab.numFailures = 2 + cc.inactiveBucket.numSuccesses = 4 + cc.inactiveBucket.numFailures = 5 + cc.clear() + // Both the active and inactive buckets should be cleared. + ccWant := newCallCounter() + if diff := cmp.Diff(cc, ccWant); diff != "" { + t.Fatalf("callCounter is different than expected, diff (-got +want): %v", diff) + } +} + +// TestSwap tests that swap() on the callCounter successfully has the desired +// end result of inactive bucket containing the previous active buckets data, +// and the active bucket being cleared. +func (s) TestSwap(t *testing.T) { + cc := newCallCounter() + ab := (*bucket)(atomic.LoadPointer(&cc.activeBucket)) + ab.numSuccesses = 1 + ab.numFailures = 2 + cc.inactiveBucket.numSuccesses = 4 + cc.inactiveBucket.numFailures = 5 + ib := cc.inactiveBucket + cc.swap() + // Inactive should pick up active's data, active should be swapped to zeroed + // inactive. + ccWant := newCallCounter() + ccWant.inactiveBucket.numSuccesses = 1 + ccWant.inactiveBucket.numFailures = 2 + atomic.StorePointer(&ccWant.activeBucket, unsafe.Pointer(ib)) + if diff := cmp.Diff(cc, ccWant); diff != "" { + t.Fatalf("callCounter is different than expected, diff (-got +want): %v", diff) + } +} diff --git a/xds/internal/balancer/outlierdetection/config.go b/xds/internal/balancer/outlierdetection/config.go new file mode 100644 index 000000000000..196a562ed69d --- /dev/null +++ b/xds/internal/balancer/outlierdetection/config.go @@ -0,0 +1,240 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "encoding/json" + "time" + + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +// SuccessRateEjection is parameters for the success rate ejection algorithm. +// This algorithm monitors the request success rate for all endpoints and ejects +// individual endpoints whose success rates are statistical outliers. +type SuccessRateEjection struct { + // StddevFactor is used to determine the ejection threshold for + // success rate outlier ejection. The ejection threshold is the difference + // between the mean success rate, and the product of this factor and the + // standard deviation of the mean success rate: mean - (stdev * + // success_rate_stdev_factor). This factor is divided by a thousand to get a + // double. That is, if the desired factor is 1.9, the runtime value should + // be 1900. Defaults to 1900. + StdevFactor uint32 `json:"stdevFactor,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually ejected + // when an outlier status is detected through success rate statistics. This + // setting can be used to disable ejection or to ramp it up slowly. Defaults + // to 100. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the number of hosts in a cluster that must have enough + // request volume to detect success rate outliers. If the number of hosts is + // less than this setting, outlier detection via success rate statistics is + // not performed for any host in the cluster. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // include this host in success rate based outlier detection. If the volume + // is lower than this setting, outlier detection via success rate statistics + // is not performed for that host. Defaults to 100. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type successRateEjection SuccessRateEjection + +// UnmarshalJSON unmarshals JSON into SuccessRateEjection. If a +// SuccessRateEjection field is not set, that field will get its default value. +func (sre *SuccessRateEjection) UnmarshalJSON(j []byte) error { + sre.StdevFactor = 1900 + sre.EnforcementPercentage = 100 + sre.MinimumHosts = 5 + sre.RequestVolume = 100 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*successRateEjection)(sre)) +} + +// Equal returns whether the SuccessRateEjection is the same with the parameter. +func (sre *SuccessRateEjection) Equal(sre2 *SuccessRateEjection) bool { + if sre == nil && sre2 == nil { + return true + } + if (sre != nil) != (sre2 != nil) { + return false + } + if sre.StdevFactor != sre2.StdevFactor { + return false + } + if sre.EnforcementPercentage != sre2.EnforcementPercentage { + return false + } + if sre.MinimumHosts != sre2.MinimumHosts { + return false + } + return sre.RequestVolume == sre2.RequestVolume +} + +// FailurePercentageEjection is parameters for the failure percentage algorithm. +// This algorithm ejects individual endpoints whose failure rate is greater than +// some threshold, independently of any other endpoint. +type FailurePercentageEjection struct { + // Threshold is the failure percentage to use when determining failure + // percentage-based outlier detection. If the failure percentage of a given + // host is greater than or equal to this value, it will be ejected. Defaults + // to 85. + Threshold uint32 `json:"threshold,omitempty"` + // EnforcementPercentage is the % chance that a host will be actually + // ejected when an outlier status is detected through failure percentage + // statistics. This setting can be used to disable ejection or to ramp it up + // slowly. Defaults to 0. + EnforcementPercentage uint32 `json:"enforcementPercentage,omitempty"` + // MinimumHosts is the minimum number of hosts in a cluster in order to + // perform failure percentage-based ejection. If the total number of hosts + // in the cluster is less than this value, failure percentage-based ejection + // will not be performed. Defaults to 5. + MinimumHosts uint32 `json:"minimumHosts,omitempty"` + // RequestVolume is the minimum number of total requests that must be + // collected in one interval (as defined by the interval duration above) to + // perform failure percentage-based ejection for this host. If the volume is + // lower than this setting, failure percentage-based ejection will not be + // performed for this host. Defaults to 50. + RequestVolume uint32 `json:"requestVolume,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type failurePercentageEjection FailurePercentageEjection + +// UnmarshalJSON unmarshals JSON into FailurePercentageEjection. If a +// FailurePercentageEjection field is not set, that field will get its default +// value. +func (fpe *FailurePercentageEjection) UnmarshalJSON(j []byte) error { + fpe.Threshold = 85 + fpe.EnforcementPercentage = 0 + fpe.MinimumHosts = 5 + fpe.RequestVolume = 50 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*failurePercentageEjection)(fpe)) +} + +// Equal returns whether the FailurePercentageEjection is the same with the +// parameter. +func (fpe *FailurePercentageEjection) Equal(fpe2 *FailurePercentageEjection) bool { + if fpe == nil && fpe2 == nil { + return true + } + if (fpe != nil) != (fpe2 != nil) { + return false + } + if fpe.Threshold != fpe2.Threshold { + return false + } + if fpe.EnforcementPercentage != fpe2.EnforcementPercentage { + return false + } + if fpe.MinimumHosts != fpe2.MinimumHosts { + return false + } + return fpe.RequestVolume == fpe2.RequestVolume +} + +// LBConfig is the config for the outlier detection balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // Interval is the time interval between ejection analysis sweeps. This can + // result in both new ejections as well as addresses being returned to + // service. Defaults to 10s. + Interval iserviceconfig.Duration `json:"interval,omitempty"` + // BaseEjectionTime is the base time that a host is ejected for. The real + // time is equal to the base time multiplied by the number of times the host + // has been ejected and is capped by MaxEjectionTime. Defaults to 30s. + BaseEjectionTime iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` + // MaxEjectionTime is the maximum time that an address is ejected for. If + // not specified, the default value (300s) or the BaseEjectionTime value is + // applied, whichever is larger. + MaxEjectionTime iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` + // MaxEjectionPercent is the maximum % of an upstream cluster that can be + // ejected due to outlier detection. Defaults to 10% but will eject at least + // one host regardless of the value. + MaxEjectionPercent uint32 `json:"maxEjectionPercent,omitempty"` + // SuccessRateEjection is the parameters for the success rate ejection + // algorithm. If set, success rate ejections will be performed. + SuccessRateEjection *SuccessRateEjection `json:"successRateEjection,omitempty"` + // FailurePercentageEjection is the parameters for the failure percentage + // algorithm. If set, failure rate ejections will be performed. + FailurePercentageEjection *FailurePercentageEjection `json:"failurePercentageEjection,omitempty"` + // ChildPolicy is the config for the child policy. + ChildPolicy *iserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// For UnmarshalJSON to work correctly and set defaults without infinite +// recursion. +type lbConfig LBConfig + +// UnmarshalJSON unmarshals JSON into LBConfig. If a top level LBConfig field +// (i.e. not next layer sre or fpe) is not set, that field will get its default +// value. If sre or fpe is not set, it will stay unset, otherwise it will +// unmarshal on those types populating with default values for their fields if +// needed. +func (lbc *LBConfig) UnmarshalJSON(j []byte) error { + // Default top layer values as documented in A50. + lbc.Interval = iserviceconfig.Duration(10 * time.Second) + lbc.BaseEjectionTime = iserviceconfig.Duration(30 * time.Second) + lbc.MaxEjectionTime = iserviceconfig.Duration(300 * time.Second) + lbc.MaxEjectionPercent = 10 + // Unmarshal JSON on a type with zero values for methods, including + // UnmarshalJSON. Overwrites defaults, leaves alone if not. typecast to + // avoid infinite recursion by not recalling this function and causing stack + // overflow. + return json.Unmarshal(j, (*lbConfig)(lbc)) +} + +// EqualIgnoringChildPolicy returns whether the LBConfig is same with the +// parameter outside of the child policy, only comparing the Outlier Detection +// specific configuration. +func (lbc *LBConfig) EqualIgnoringChildPolicy(lbc2 *LBConfig) bool { + if lbc == nil && lbc2 == nil { + return true + } + if (lbc != nil) != (lbc2 != nil) { + return false + } + if lbc.Interval != lbc2.Interval { + return false + } + if lbc.BaseEjectionTime != lbc2.BaseEjectionTime { + return false + } + if lbc.MaxEjectionTime != lbc2.MaxEjectionTime { + return false + } + if lbc.MaxEjectionPercent != lbc2.MaxEjectionPercent { + return false + } + if !lbc.SuccessRateEjection.Equal(lbc2.SuccessRateEjection) { + return false + } + return lbc.FailurePercentageEjection.Equal(lbc2.FailurePercentageEjection) +} diff --git a/xds/internal/balancer/outlierdetection/config_test.go b/xds/internal/balancer/outlierdetection/config_test.go new file mode 100644 index 000000000000..ce924dca1bc6 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/config_test.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "reflect" + "testing" +) + +func TestSuccessRateEjection(t *testing.T) { + fields := map[string]bool{ + "StdevFactor": true, + "EnforcementPercentage": true, + "MinimumHosts": true, + "RequestVolume": true, + } + typ := reflect.TypeOf(SuccessRateEjection{}) + for i := 0; i < typ.NumField(); i++ { + if n := typ.Field(i).Name; !fields[n] { + t.Errorf("New field in SuccessRateEjection %q, update this test and Equal", n) + } + } +} + +func TestEqualFieldsFailurePercentageEjection(t *testing.T) { + fields := map[string]bool{ + "Threshold": true, + "EnforcementPercentage": true, + "MinimumHosts": true, + "RequestVolume": true, + } + typ := reflect.TypeOf(FailurePercentageEjection{}) + for i := 0; i < typ.NumField(); i++ { + if n := typ.Field(i).Name; !fields[n] { + t.Errorf("New field in FailurePercentageEjection %q, update this test and Equal", n) + } + } +} + +func TestEqualFieldsLBConfig(t *testing.T) { + fields := map[string]bool{ + "LoadBalancingConfig": true, + "Interval": true, + "BaseEjectionTime": true, + "MaxEjectionTime": true, + "MaxEjectionPercent": true, + "SuccessRateEjection": true, + "FailurePercentageEjection": true, + "ChildPolicy": true, + } + typ := reflect.TypeOf(LBConfig{}) + for i := 0; i < typ.NumField(); i++ { + if n := typ.Field(i).Name; !fields[n] { + t.Errorf("New field in LBConfig %q, update this test and EqualIgnoringChildPolicy", n) + } + } +} diff --git a/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go new file mode 100644 index 000000000000..e08ddc98ea79 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/e2e_test/outlierdetection_test.go @@ -0,0 +1,368 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package e2e_test contains e2e test cases for the Outlier Detection LB Policy. +package e2e_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/stubserver" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + "google.golang.org/grpc/serviceconfig" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/xds/internal/balancer/outlierdetection" // To register helper functions which register/unregister Outlier Detection LB Policy. +) + +var defaultTestTimeout = 5 * time.Second + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// Setup spins up three test backends, each listening on a port on localhost. +// Two of the backends are configured to always reply with an empty response and +// no error and one is configured to always return an error. +func setupBackends(t *testing.T) ([]string, func()) { + t.Helper() + + backends := make([]*stubserver.StubServer, 3) + addresses := make([]string, 3) + // Construct and start 2 working backends. + for i := 0; i < 2; i++ { + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started good TestService backend at: %q", backend.Address) + backends[i] = backend + addresses[i] = backend.Address + } + + // Construct and start a failing backend. + backend := &stubserver.StubServer{ + EmptyCallF: func(ctx context.Context, in *testpb.Empty) (*testpb.Empty, error) { + return nil, errors.New("some error") + }, + } + if err := backend.StartServer(); err != nil { + t.Fatalf("Failed to start backend: %v", err) + } + t.Logf("Started bad TestService backend at: %q", backend.Address) + backends[2] = backend + addresses[2] = backend.Address + cancel := func() { + for _, backend := range backends { + backend.Stop() + } + } + return addresses, cancel +} + +// checkRoundRobinRPCs verifies that EmptyCall RPCs on the given ClientConn, +// connected to a server exposing the test.grpc_testing.TestService, are +// roundrobined across the given backend addresses. +// +// Returns a non-nil error if context deadline expires before RPCs start to get +// roundrobined across the given backends. +func checkRoundRobinRPCs(ctx context.Context, client testgrpc.TestServiceClient, addrs []resolver.Address) error { + wantAddrCount := make(map[string]int) + for _, addr := range addrs { + wantAddrCount[addr.Addr]++ + } + for ; ctx.Err() == nil; <-time.After(time.Millisecond) { + // Perform 3 iterations. + var iterations [][]string + for i := 0; i < 3; i++ { + iteration := make([]string, len(addrs)) + for c := 0; c < len(addrs); c++ { + var peer peer.Peer + client.EmptyCall(ctx, &testpb.Empty{}, grpc.Peer(&peer)) + if peer.Addr != nil { + iteration[c] = peer.Addr.String() + } + } + iterations = append(iterations, iteration) + } + // Ensure the the first iteration contains all addresses in addrs. + gotAddrCount := make(map[string]int) + for _, addr := range iterations[0] { + gotAddrCount[addr]++ + } + if diff := cmp.Diff(gotAddrCount, wantAddrCount); diff != "" { + continue + } + // Ensure all three iterations contain the same addresses. + if !cmp.Equal(iterations[0], iterations[1]) || !cmp.Equal(iterations[0], iterations[2]) { + continue + } + return nil + } + return fmt.Errorf("timeout when waiting for roundrobin distribution of RPCs across addresses: %v", addrs) +} + +// TestOutlierDetectionAlgorithmsE2E tests the Outlier Detection Success Rate +// and Failure Percentage algorithms in an e2e fashion. The Outlier Detection +// Balancer is configured as the top level LB Policy of the channel with a Round +// Robin child, and connects to three upstreams. Two of the upstreams are healthy and +// one is unhealthy. The two algorithms should at some point eject the failing +// upstream, causing RPC's to not be routed to that upstream, and only be +// Round Robined across the two healthy upstreams. Other than the intervals the +// unhealthy upstream is ejected, RPC's should regularly round robin +// across all three upstreams. +func (s) TestOutlierDetectionAlgorithmsE2E(t *testing.T) { + tests := []struct { + name string + odscJSON string + }{ + { + name: "Success Rate Algorithm", + odscJSON: ` +{ + "loadBalancingConfig": [ + { + "outlier_detection_experimental": { + "interval": "0.050s", + "baseEjectionTime": "0.100s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 33, + "successRateEjection": { + "stdevFactor": 50, + "enforcementPercentage": 100, + "minimumHosts": 3, + "requestVolume": 5 + }, + "childPolicy": [{"round_robin": {}}] + } + } + ] +}`, + }, + { + name: "Failure Percentage Algorithm", + odscJSON: ` +{ + "loadBalancingConfig": [ + { + "outlier_detection_experimental": { + "interval": "0.050s", + "baseEjectionTime": "0.100s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 33, + "failurePercentageEjection": { + "threshold": 50, + "enforcementPercentage": 100, + "minimumHosts": 3, + "requestVolume": 5 + }, + "childPolicy": [{"round_robin": {}} + ] + } + } + ] +}`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + addresses, cancel := setupBackends(t) + defer cancel() + + mr := manual.NewBuilderWithScheme("od-e2e") + defer mr.Close() + + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(test.odscJSON) + // The full list of addresses. + fullAddresses := []resolver.Address{ + {Addr: addresses[0]}, + {Addr: addresses[1]}, + {Addr: addresses[2]}, + } + mr.InitialState(resolver.State{ + Addresses: fullAddresses, + ServiceConfig: sc, + }) + + cc, err := grpc.Dial(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testServiceClient := testgrpc.NewTestServiceClient(cc) + + // At first, due to no statistics on each of the backends, the 3 + // upstreams should all be round robined across. + if err = checkRoundRobinRPCs(ctx, testServiceClient, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The addresses which don't return errors. + okAddresses := []resolver.Address{ + {Addr: addresses[0]}, + {Addr: addresses[1]}, + } + // After calling the three upstreams, one of them constantly error + // and should eventually be ejected for a period of time. This + // period of time should cause the RPC's to be round robined only + // across the two that are healthy. + if err = checkRoundRobinRPCs(ctx, testServiceClient, okAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The failing upstream isn't ejected indefinitely, and eventually + // should be unejected in subsequent iterations of the interval + // algorithm as per the spec for the two specific algorithms. + if err = checkRoundRobinRPCs(ctx, testServiceClient, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + }) + } +} + +// TestNoopConfiguration tests the Outlier Detection Balancer configured with a +// noop configuration. The noop configuration should cause the Outlier Detection +// Balancer to not count RPC's, and thus never eject any upstreams and continue +// to route to every upstream connected to, even if they continuously error. +// Once the Outlier Detection Balancer gets reconfigured with configuration +// requiring counting RPC's, the Outlier Detection Balancer should start +// ejecting any upstreams as specified in the configuration. +func (s) TestNoopConfiguration(t *testing.T) { + addresses, cancel := setupBackends(t) + defer cancel() + + mr := manual.NewBuilderWithScheme("od-e2e") + defer mr.Close() + + noopODServiceConfigJSON := ` +{ + "loadBalancingConfig": [ + { + "outlier_detection_experimental": { + "interval": "0.050s", + "baseEjectionTime": "0.100s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 33, + "childPolicy": [{"round_robin": {}}] + } + } + ] +}` + sc := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(noopODServiceConfigJSON) + // The full list of addresses. + fullAddresses := []resolver.Address{ + {Addr: addresses[0]}, + {Addr: addresses[1]}, + {Addr: addresses[2]}, + } + mr.InitialState(resolver.State{ + Addresses: fullAddresses, + ServiceConfig: sc, + }) + cc, err := grpc.Dial(mr.Scheme()+":///", grpc.WithResolvers(mr), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("grpc.Dial() failed: %v", err) + } + defer cc.Close() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + testServiceClient := testgrpc.NewTestServiceClient(cc) + + for i := 0; i < 2; i++ { + // Since the Outlier Detection Balancer starts with a noop + // configuration, it shouldn't count RPCs or eject any upstreams. Thus, + // even though an upstream it connects to constantly errors, it should + // continue to Round Robin across every upstream. + if err := checkRoundRobinRPCs(ctx, testServiceClient, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + } + + // Reconfigure the Outlier Detection Balancer with a configuration that + // specifies to count RPC's and eject upstreams. Due to the balancer no + // longer being a noop, it should eject any unhealthy addresses as specified + // by the failure percentage portion of the configuration. + countingODServiceConfigJSON := ` +{ + "loadBalancingConfig": [ + { + "outlier_detection_experimental": { + "interval": "0.050s", + "baseEjectionTime": "0.100s", + "maxEjectionTime": "300s", + "maxEjectionPercent": 33, + "failurePercentageEjection": { + "threshold": 50, + "enforcementPercentage": 100, + "minimumHosts": 3, + "requestVolume": 5 + }, + "childPolicy": [{"round_robin": {}}] + } + } + ] +}` + sc = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(countingODServiceConfigJSON) + + mr.UpdateState(resolver.State{ + Addresses: fullAddresses, + ServiceConfig: sc, + }) + + // At first on the reconfigured balancer, the balancer has no stats + // collected about upstreams. Thus, it should at first route across the full + // upstream list. + if err = checkRoundRobinRPCs(ctx, testServiceClient, fullAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } + + // The addresses which don't return errors. + okAddresses := []resolver.Address{ + {Addr: addresses[0]}, + {Addr: addresses[1]}, + } + // Now that the reconfigured balancer has data about the failing upstream, + // it should eject the upstream and only route across the two healthy + // upstreams. + if err = checkRoundRobinRPCs(ctx, testServiceClient, okAddresses); err != nil { + t.Fatalf("error in expected round robin: %v", err) + } +} diff --git a/xds/internal/balancer/lrs/logging.go b/xds/internal/balancer/outlierdetection/logging.go similarity index 81% rename from xds/internal/balancer/lrs/logging.go rename to xds/internal/balancer/outlierdetection/logging.go index 602dac099597..705b0cb6918d 100644 --- a/xds/internal/balancer/lrs/logging.go +++ b/xds/internal/balancer/outlierdetection/logging.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2022 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ * */ -package lrs +package outlierdetection import ( "fmt" @@ -25,10 +25,10 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const prefix = "[lrs-lb %p] " +const prefix = "[outlier-detection-lb %p] " var logger = grpclog.Component("xds") -func prefixLogger(p *lrsBalancer) *internalgrpclog.PrefixLogger { +func prefixLogger(p *outlierDetectionBalancer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } diff --git a/xds/internal/balancer/outlierdetection/subconn_wrapper.go b/xds/internal/balancer/outlierdetection/subconn_wrapper.go new file mode 100644 index 000000000000..71a996f29ae0 --- /dev/null +++ b/xds/internal/balancer/outlierdetection/subconn_wrapper.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package outlierdetection + +import ( + "fmt" + "unsafe" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/resolver" +) + +// subConnWrapper wraps every created SubConn in the Outlier Detection Balancer, +// to help track the latest state update from the underlying SubConn, and also +// whether or not this SubConn is ejected. +type subConnWrapper struct { + balancer.SubConn + + // addressInfo is a pointer to the subConnWrapper's corresponding address + // map entry, if the map entry exists. + addressInfo unsafe.Pointer // *addressInfo + // These two pieces of state will reach eventual consistency due to sync in + // run(), and child will always have the correctly updated SubConnState. + // latestState is the latest state update from the underlying SubConn. This + // is used whenever a SubConn gets unejected. + latestState balancer.SubConnState + ejected bool + + scUpdateCh *buffer.Unbounded + + // addresses is the list of address(es) this SubConn was created with to + // help support any change in address(es) + addresses []resolver.Address +} + +// eject causes the wrapper to report a state update with the TRANSIENT_FAILURE +// state, and to stop passing along updates from the underlying subchannel. +func (scw *subConnWrapper) eject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: true, + }) +} + +// uneject causes the wrapper to report a state update with the latest update +// from the underlying subchannel, and resume passing along updates from the +// underlying subchannel. +func (scw *subConnWrapper) uneject() { + scw.scUpdateCh.Put(&ejectionUpdate{ + scw: scw, + isEjected: false, + }) +} + +func (scw *subConnWrapper) String() string { + return fmt.Sprintf("%+v", scw.addresses) +} diff --git a/xds/internal/balancer/priority/balancer.go b/xds/internal/balancer/priority/balancer.go index 6c4ff08378ec..40c047d558b7 100644 --- a/xds/internal/balancer/priority/balancer.go +++ b/xds/internal/balancer/priority/balancer.go @@ -24,47 +24,55 @@ package priority import ( + "encoding/json" "fmt" "sync" "time" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/hierarchy" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" + "google.golang.org/grpc/serviceconfig" ) -const priorityBalancerName = "priority_experimental" +// Name is the name of the priority balancer. +const Name = "priority_experimental" func init() { - balancer.Register(priorityBB{}) + balancer.Register(bb{}) } -type priorityBB struct{} +type bb struct{} -func (priorityBB) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { b := &priorityBalancer{ cc: cc, done: grpcsync.NewEvent(), - childToPriority: make(map[string]int), children: make(map[string]*childBalancer), childBalancerStateUpdate: buffer.NewUnbounded(), } b.logger = prefixLogger(b) - b.bg = balancergroup.New(cc, bOpts, b, nil, b.logger) + b.bg = balancergroup.New(cc, bOpts, b, b.logger) b.bg.Start() go b.run() b.logger.Infof("Created") return b +} +func (b bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(s) } -func (priorityBB) Name() string { - return priorityBalancerName +func (bb) Name() string { + return Name } // timerWrapper wraps a timer with a boolean. So that when a race happens @@ -83,33 +91,28 @@ type priorityBalancer struct { mu sync.Mutex childInUse string - // priority of the child that's current in use. Int starting from 0, and 0 - // is the higher priority. - priorityInUse int // priorities is a list of child names from higher to lower priority. priorities []string - // childToPriority is a map from the child name to it's priority. Priority - // is an int start from 0, and 0 is the higher priority. - childToPriority map[string]int // children is a map from child name to sub-balancers. children map[string]*childBalancer - // The timer to give a priority some time to connect. And if the priority - // doesn't go into Ready/Failure, the next priority will be started. - // - // One timer is enough because there can be at most one priority in init - // state. - priorityInitTimer *timerWrapper + + // Set during UpdateClientConnState when calling into sub-balancers. + // Prevents child updates from recomputing the active priority or sending + // an update of the aggregated picker to the parent. Cleared after all + // sub-balancers have finished UpdateClientConnState, after which + // syncPriority is called manually. + inhibitPickerUpdates bool } func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - newConfig, ok := s.BalancerConfig.(*lbConfig) + b.logger.Debugf("Received an update with balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) } addressesSplit := hierarchy.Group(s.ResolverState.Addresses) b.mu.Lock() - defer b.mu.Unlock() // Create and remove children, since we know all children from the config // are used by some priority. for name, newSubConfig := range newConfig.Children { @@ -124,8 +127,8 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // This is a new child, add it to the children list. But note that // the balancer isn't built, because this child can be a low // priority. If necessary, it will be built when syncing priorities. - cb := newChildBalancer(name, b, bb) - cb.updateConfig(newSubConfig.Config.Config, resolver.State{ + cb := newChildBalancer(name, b, bb.Name(), b.cc) + cb.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, @@ -138,37 +141,56 @@ func (b *priorityBalancer) UpdateClientConnState(s balancer.ClientConnState) err // The balancing policy name is changed, close the old child. But don't // rebuild, rebuild will happen when syncing priorities. - if currentChild.bb.Name() != bb.Name() { + if currentChild.balancerName != bb.Name() { currentChild.stop() - currentChild.bb = bb + currentChild.updateBalancerName(bb.Name()) } // Update config and address, but note that this doesn't send the - // updates to child balancer (the child balancer might not be built, if - // it's a low priority). - currentChild.updateConfig(newSubConfig.Config.Config, resolver.State{ + // updates to non-started child balancers (the child balancer might not + // be built, if it's a low priority). + currentChild.updateConfig(newSubConfig, resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, Attributes: s.ResolverState.Attributes, }) } - - // Remove child from children if it's not in new config. + // Cleanup resources used by children removed from the config. for name, oldChild := range b.children { if _, ok := newConfig.Children[name]; !ok { oldChild.stop() + delete(b.children, name) } } // Update priorities and handle priority changes. b.priorities = newConfig.Priorities - b.childToPriority = make(map[string]int, len(newConfig.Priorities)) - for pi, pName := range newConfig.Priorities { - b.childToPriority[pName] = pi + + // Everything was removed by the update. + if len(b.priorities) == 0 { + b.childInUse = "" + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(ErrAllPrioritiesRemoved), + }) + b.mu.Unlock() + return nil } - // Sync the states of all children to the new updated priorities. This - // include starting/stopping child balancers when necessary. - b.syncPriority() + + // This will sync the states of all children to the new updated + // priorities. Includes starting/stopping child balancers when necessary. + // Block picker updates until all children have had a chance to call + // UpdateState to prevent races where, e.g., the active priority reports + // transient failure but a higher priority may have reported something that + // made it active, and if the transient failure update is handled first, + // RPCs could fail. + b.inhibitPickerUpdates = true + // Add an item to queue to notify us when the current items in the queue + // are done and syncPriority has been called. + done := make(chan struct{}) + b.childBalancerStateUpdate.Put(resumePickerUpdates{done: done}) + b.mu.Unlock() + <-done return nil } @@ -183,6 +205,7 @@ func (b *priorityBalancer) UpdateSubConnState(sc balancer.SubConn, state balance func (b *priorityBalancer) Close() { b.bg.Close() + b.childBalancerStateUpdate.Close() b.mu.Lock() defer b.mu.Unlock() @@ -190,27 +213,21 @@ func (b *priorityBalancer) Close() { // Clear states of the current child in use, so if there's a race in picker // update, it will be dropped. b.childInUse = "" - b.stopPriorityInitTimer() + // Stop the child policies, this is necessary to stop the init timers in the + // children. + for _, child := range b.children { + child.stop() + } } -// stopPriorityInitTimer stops the priorityInitTimer if it's not nil, and set it -// to nil. -// -// Caller must hold b.mu. -func (b *priorityBalancer) stopPriorityInitTimer() { - timerW := b.priorityInitTimer - if timerW == nil { - return - } - b.priorityInitTimer = nil - timerW.stopped = true - timerW.timer.Stop() +func (b *priorityBalancer) ExitIdle() { + b.bg.ExitIdle() } // UpdateState implements balancergroup.BalancerStateAggregator interface. The // balancer group sends new connectivity state and picker here. func (b *priorityBalancer) UpdateState(childName string, state balancer.State) { - b.childBalancerStateUpdate.Put(&childBalancerState{ + b.childBalancerStateUpdate.Put(childBalancerState{ name: childName, s: state, }) @@ -221,19 +238,37 @@ type childBalancerState struct { s balancer.State } +type resumePickerUpdates struct { + done chan struct{} +} + // run handles child update in a separate goroutine, so if the child sends // updates inline (when called by parent), it won't cause deadlocks (by trying // to hold the same mutex). func (b *priorityBalancer) run() { for { select { - case u := <-b.childBalancerStateUpdate.Get(): + case u, ok := <-b.childBalancerStateUpdate.Get(): + if !ok { + return + } b.childBalancerStateUpdate.Load() - s := u.(*childBalancerState) // Needs to handle state update in a goroutine, because each state // update needs to start/close child policy, could result in // deadlock. - b.handleChildStateUpdate(s.name, s.s) + b.mu.Lock() + if b.done.HasFired() { + return + } + switch s := u.(type) { + case childBalancerState: + b.handleChildStateUpdate(s.name, s.s) + case resumePickerUpdates: + b.inhibitPickerUpdates = false + b.syncPriority(b.childInUse) + close(s.done) + } + b.mu.Unlock() case <-b.done.Done(): return } diff --git a/xds/internal/balancer/priority/balancer_child.go b/xds/internal/balancer/priority/balancer_child.go index d012ad4e4593..7e8ccbd335e9 100644 --- a/xds/internal/balancer/priority/balancer_child.go +++ b/xds/internal/balancer/priority/balancer_child.go @@ -19,6 +19,8 @@ package priority import ( + "time" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" @@ -27,25 +29,40 @@ import ( ) type childBalancer struct { - name string - parent *priorityBalancer - bb balancer.Builder + name string + parent *priorityBalancer + parentCC balancer.ClientConn + balancerName string + cc *ignoreResolveNowClientConn - config serviceconfig.LoadBalancingConfig - rState resolver.State + ignoreReresolutionRequests bool + config serviceconfig.LoadBalancingConfig + rState resolver.State started bool - state balancer.State + // This is set when the child reports TransientFailure, and unset when it + // reports Ready or Idle. It is used to decide whether the failover timer + // should start when the child is transitioning into Connecting. The timer + // will be restarted if the child has not reported TF more recently than it + // reported Ready or Idle. + reportedTF bool + // The latest state the child balancer provided. + state balancer.State + // The timer to give a priority some time to connect. And if the priority + // doesn't go into Ready/Failure, the next priority will be started. + initTimer *timerWrapper } // newChildBalancer creates a child balancer place holder, but doesn't // build/start the child balancer. -func newChildBalancer(name string, parent *priorityBalancer, bb balancer.Builder) *childBalancer { +func newChildBalancer(name string, parent *priorityBalancer, balancerName string, cc balancer.ClientConn) *childBalancer { return &childBalancer{ - name: name, - parent: parent, - bb: bb, - started: false, + name: name, + parent: parent, + parentCC: cc, + balancerName: balancerName, + cc: newIgnoreResolveNowClientConn(cc, false), + started: false, // Start with the connecting state and picker with re-pick error, so // that when a priority switch causes this child picked before it's // balancing policy is created, a re-pick will happen. @@ -56,11 +73,24 @@ func newChildBalancer(name string, parent *priorityBalancer, bb balancer.Builder } } +// updateBalancerName updates balancer name for the child, but doesn't build a +// new one. The parent priority LB always closes the child policy before +// updating the balancer name, and the new balancer is built when it gets added +// to the balancergroup as part of start(). +func (cb *childBalancer) updateBalancerName(balancerName string) { + cb.balancerName = balancerName + cb.cc = newIgnoreResolveNowClientConn(cb.parentCC, cb.ignoreReresolutionRequests) +} + // updateConfig sets childBalancer's config and state, but doesn't send update to -// the child balancer. -func (cb *childBalancer) updateConfig(config serviceconfig.LoadBalancingConfig, rState resolver.State) { - cb.config = config +// the child balancer unless it is started. +func (cb *childBalancer) updateConfig(child *Child, rState resolver.State) { + cb.ignoreReresolutionRequests = child.IgnoreReresolutionRequests + cb.config = child.Config.Config cb.rState = rState + if cb.started { + cb.sendUpdate() + } } // start builds the child balancer if it's not already started. @@ -71,18 +101,21 @@ func (cb *childBalancer) start() { return } cb.started = true - cb.parent.bg.Add(cb.name, cb.bb) + cb.parent.bg.AddWithClientConn(cb.name, cb.balancerName, cb.cc) + cb.startInitTimer() + cb.sendUpdate() } // sendUpdate sends the addresses and config to the child balancer. func (cb *childBalancer) sendUpdate() { + cb.cc.updateIgnoreResolveNow(cb.ignoreReresolutionRequests) // TODO: return and aggregate the returned error in the parent. err := cb.parent.bg.UpdateClientConnState(cb.name, balancer.ClientConnState{ ResolverState: cb.rState, BalancerConfig: cb.config, }) if err != nil { - cb.parent.logger.Warningf("failed to update ClientConn state for child %v: %v", cb.name, err) + cb.parent.logger.Warningf("Failed to update state for child policy %q: %v", cb.name, err) } } @@ -95,10 +128,46 @@ func (cb *childBalancer) stop() { if !cb.started { return } + cb.stopInitTimer() cb.parent.bg.Remove(cb.name) cb.started = false cb.state = balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), } + // Clear child.reportedTF, so that if this child is started later, it will + // be given time to connect. + cb.reportedTF = false +} + +func (cb *childBalancer) startInitTimer() { + if cb.initTimer != nil { + return + } + // Need this local variable to capture timerW in the AfterFunc closure + // to check the stopped boolean. + timerW := &timerWrapper{} + cb.initTimer = timerW + timerW.timer = time.AfterFunc(DefaultPriorityInitTimeout, func() { + cb.parent.mu.Lock() + defer cb.parent.mu.Unlock() + if timerW.stopped { + return + } + cb.initTimer = nil + // Re-sync the priority. This will switch to the next priority if + // there's any. Note that it's important sync() is called after setting + // initTimer to nil. + cb.parent.syncPriority("") + }) +} + +func (cb *childBalancer) stopInitTimer() { + timerW := cb.initTimer + if timerW == nil { + return + } + cb.initTimer = nil + timerW.stopped = true + timerW.timer.Stop() } diff --git a/xds/internal/balancer/priority/balancer_priority.go b/xds/internal/balancer/priority/balancer_priority.go index ea2f4f04184c..4655bf418474 100644 --- a/xds/internal/balancer/priority/balancer_priority.go +++ b/xds/internal/balancer/priority/balancer_priority.go @@ -23,77 +23,89 @@ import ( "time" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/connectivity" ) var ( - errAllPrioritiesRemoved = errors.New("no locality is provided, all priorities are removed") - defaultPriorityInitTimeout = 10 * time.Second + // ErrAllPrioritiesRemoved is returned by the picker when there's no priority available. + ErrAllPrioritiesRemoved = errors.New("no priority is provided, all priorities are removed") + // DefaultPriorityInitTimeout is the timeout after which if a priority is + // not READY, the next will be started. It's exported to be overridden by + // tests. + DefaultPriorityInitTimeout = 10 * time.Second ) -// syncPriority handles priority after a config update. It makes sure the -// balancer state (started or not) is in sync with the priorities (even in -// tricky cases where a child is moved from a priority to another). +// syncPriority handles priority after a config update or a child balancer +// connectivity state update. It makes sure the balancer state (started or not) +// is in sync with the priorities (even in tricky cases where a child is moved +// from a priority to another). // // It's guaranteed that after this function returns: -// - If some child is READY, it is childInUse, and all lower priorities are -// closed. -// - If some child is newly started(in Connecting for the first time), it is -// childInUse, and all lower priorities are closed. -// - Otherwise, the lowest priority is childInUse (none of the children is -// ready, and the overall state is not ready). +// +// If some child is READY, it is childInUse, and all lower priorities are +// closed. +// +// If some child is newly started(in Connecting for the first time), it is +// childInUse, and all lower priorities are closed. +// +// Otherwise, the lowest priority is childInUse (none of the children is +// ready, and the overall state is not ready). // // Steps: -// - If all priorities were deleted, unset childInUse (to an empty string), and -// set parent ClientConn to TransientFailure -// - Otherwise, Scan all children from p0, and check balancer stats: -// - For any of the following cases: -// - If balancer is not started (not built), this is either a new child -// with high priority, or a new builder for an existing child. -// - If balancer is READY -// - If this is the lowest priority -// - do the following: -// - if this is not the old childInUse, override picker so old picker is no -// longer used. -// - switch to it (because all higher priorities are neither new or Ready) -// - forward the new addresses and config +// +// If all priorities were deleted, unset childInUse (to an empty string), and +// set parent ClientConn to TransientFailure +// +// Otherwise, Scan all children from p0, and check balancer stats: +// +// For any of the following cases: +// +// If balancer is not started (not built), this is either a new child with +// high priority, or a new builder for an existing child. +// +// If balancer is Connecting and has non-nil initTimer (meaning it +// transitioned from Ready or Idle to connecting, not from TF, so we +// should give it init-time to connect). +// +// If balancer is READY or IDLE +// +// If this is the lowest priority +// +// do the following: +// +// if this is not the old childInUse, override picker so old picker is no +// longer used. +// +// switch to it (because all higher priorities are neither new or Ready) +// +// forward the new addresses and config // // Caller must hold b.mu. -func (b *priorityBalancer) syncPriority() { - // Everything was removed by the update. - if len(b.priorities) == 0 { - b.childInUse = "" - b.priorityInUse = 0 - // Stop the init timer. This can happen if the only priority is removed - // shortly after it's added. - b.stopPriorityInitTimer() - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(errAllPrioritiesRemoved), - }) +func (b *priorityBalancer) syncPriority(childUpdating string) { + if b.inhibitPickerUpdates { + b.logger.Debugf("Skipping update from child policy %q", childUpdating) return } - for p, name := range b.priorities { child, ok := b.children[name] if !ok { - b.logger.Errorf("child with name %q is not found in children", name) + b.logger.Warningf("Priority name %q is not found in list of child policies", name) continue } if !child.started || child.state.ConnectivityState == connectivity.Ready || + child.state.ConnectivityState == connectivity.Idle || + (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { - if b.childInUse != "" && b.childInUse != child.name { - // childInUse was set and is different from this child, will - // change childInUse later. We need to update picker here - // immediately so parent stops using the old picker. + if b.childInUse != child.name || child.name == childUpdating { + b.logger.Debugf("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + // If we switch children or the child in use just updated its + // picker, push the child's picker to the parent. b.cc.UpdateState(child.state) } - b.logger.Infof("switching to (%q, %v) in syncPriority", child.name, p) + b.logger.Debugf("Switching to (%q, %v) in syncPriority", child.name, p) b.switchToChild(child, p) - child.sendUpdate() break } } @@ -107,7 +119,7 @@ func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { name := b.priorities[i] child, ok := b.children[name] if !ok { - b.logger.Errorf("child with name %q is not found in children", name) + b.logger.Warningf("Priority name %q is not found in list of child policies", name) continue } child.stop() @@ -118,8 +130,7 @@ func (b *priorityBalancer) stopSubBalancersLowerThanPriority(p int) { // - stop all child with lower priorities // - if childInUse is not this child // - set childInUse to this child -// - stops init timer -// - if this child is not started, start it, and start a init timer +// - if this child is not started, start it // // Note that it does NOT send the current child state (picker) to the parent // ClientConn. The caller needs to send it if necessary. @@ -149,206 +160,45 @@ func (b *priorityBalancer) switchToChild(child *childBalancer, priority int) { return } b.childInUse = child.name - b.priorityInUse = priority - - // Init timer is always for childInUse. Since we are switching to a - // different child, we will stop the init timer no matter what. If this - // child is not started, we will start the init timer later. - b.stopPriorityInitTimer() if !child.started { child.start() - // Need this local variable to capture timerW in the AfterFunc closure - // to check the stopped boolean. - timerW := &timerWrapper{} - b.priorityInitTimer = timerW - timerW.timer = time.AfterFunc(defaultPriorityInitTimeout, func() { - b.mu.Lock() - defer b.mu.Unlock() - if timerW.stopped { - return - } - b.priorityInitTimer = nil - // Switch to the next priority if there's any. - if pNext := priority + 1; pNext < len(b.priorities) { - nameNext := b.priorities[pNext] - if childNext, ok := b.children[nameNext]; ok { - b.switchToChild(childNext, pNext) - childNext.sendUpdate() - } - } - }) } } // handleChildStateUpdate start/close priorities based on the connectivity // state. func (b *priorityBalancer) handleChildStateUpdate(childName string, s balancer.State) { - b.mu.Lock() - defer b.mu.Unlock() - if b.done.HasFired() { - return - } - - priority, ok := b.childToPriority[childName] - if !ok { - b.logger.Errorf("priority: received picker update with unknown child %v", childName) - return - } - - if b.childInUse == "" { - b.logger.Errorf("priority: no child is in use when picker update is received") - return - } - - // priorityInUse is higher than this priority. - if b.priorityInUse < priority { - // Lower priorities should all be closed, this is an unexpected update. - // Can happen if the child policy sends an update after we tell it to - // close. - b.logger.Warningf("priority: received picker update from priority %v, lower than priority in use %v", priority, b.priorityInUse) - return - } - // Update state in child. The updated picker will be sent to parent later if // necessary. child, ok := b.children[childName] if !ok { - b.logger.Errorf("priority: child balancer not found for child %v, priority %v", childName, priority) + b.logger.Warningf("Child policy not found for %q", childName) + return + } + if !child.started { + b.logger.Warningf("Ignoring update from child policy %q which is not in started state: %+v", childName, s) return } - oldState := child.state.ConnectivityState child.state = s + // We start/stop the init timer of this child based on the new connectivity + // state. syncPriority() later will need the init timer (to check if it's + // nil or not) to decide which child to switch to. switch s.ConnectivityState { - case connectivity.Ready: - b.handlePriorityWithNewStateReady(child, priority) + case connectivity.Ready, connectivity.Idle: + child.reportedTF = false + child.stopInitTimer() case connectivity.TransientFailure: - b.handlePriorityWithNewStateTransientFailure(child, priority) + child.reportedTF = true + child.stopInitTimer() case connectivity.Connecting: - b.handlePriorityWithNewStateConnecting(child, priority, oldState) - default: - // New state is Idle, should never happen. Don't forward. - } -} - -// handlePriorityWithNewStateReady handles state Ready from a higher or equal -// priority. -// -// An update with state Ready: -// - If it's from higher priority: -// - Switch to this priority -// - Forward the update -// - If it's from priorityInUse: -// - Forward only -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateReady(child *childBalancer, priority int) { - // If one priority higher or equal to priorityInUse goes Ready, stop the - // init timer. If update is from higher than priorityInUse, priorityInUse - // will be closed, and the init timer will become useless. - b.stopPriorityInitTimer() - - // priorityInUse is lower than this priority, switch to this. - if b.priorityInUse > priority { - b.logger.Infof("Switching priority from %v to %v, because latter became Ready", b.priorityInUse, priority) - b.switchToChild(child, priority) - } - // Forward the update since it's READY. - b.cc.UpdateState(child.state) -} - -// handlePriorityWithNewStateTransientFailure handles state TransientFailure -// from a higher or equal priority. -// -// An update with state TransientFailure: -// - If it's from a higher priority: -// - Do not forward, and do nothing -// - If it's from priorityInUse: -// - If there's no lower: -// - Forward and do nothing else -// - If there's a lower priority: -// - Switch to the lower -// - Forward the lower child's state -// - Do NOT forward this update -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateTransientFailure(child *childBalancer, priority int) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - // priorityInUse sends a failure. Stop its init timer. - b.stopPriorityInitTimer() - priorityNext := priority + 1 - if priorityNext >= len(b.priorities) { - // Forward this update. - b.cc.UpdateState(child.state) - return - } - b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) - nameNext := b.priorities[priorityNext] - childNext := b.children[nameNext] - b.switchToChild(childNext, priorityNext) - b.cc.UpdateState(childNext.state) - childNext.sendUpdate() -} - -// handlePriorityWithNewStateConnecting handles state Connecting from a higher -// than or equal priority. -// -// An update with state Connecting: -// - If it's from a higher priority -// - Do nothing -// - If it's from priorityInUse, the behavior depends on previous state. -// -// When new state is Connecting, the behavior depends on previous state. If the -// previous state was Ready, this is a transition out from Ready to Connecting. -// Assuming there are multiple backends in the same priority, this mean we are -// in a bad situation and we should failover to the next priority (Side note: -// the current connectivity state aggregating algorithm (e.g. round-robin) is -// not handling this right, because if many backends all go from Ready to -// Connecting, the overall situation is more like TransientFailure, not -// Connecting). -// -// If the previous state was Idle, we don't do anything special with failure, -// and simply forward the update. The init timer should be in process, will -// handle failover if it timeouts. If the previous state was TransientFailure, -// we do not forward, because the lower priority is in use. -// -// Caller must make sure priorityInUse is not higher than priority. -// -// Caller must hold mu. -func (b *priorityBalancer) handlePriorityWithNewStateConnecting(child *childBalancer, priority int, oldState connectivity.State) { - // priorityInUse is lower than this priority, do nothing. - if b.priorityInUse > priority { - return - } - - switch oldState { - case connectivity.Ready: - // Handling transition from Ready to Connecting, is same as handling - // TransientFailure. There's no need to stop the init timer, because it - // should have been stopped when state turned Ready. - priorityNext := priority + 1 - if priorityNext >= len(b.priorities) { - // Forward this update. - b.cc.UpdateState(child.state) - return + if !child.reportedTF { + child.startInitTimer() } - b.logger.Infof("Switching priority from %v to %v, because former became TransientFailure", priority, priorityNext) - nameNext := b.priorities[priorityNext] - childNext := b.children[nameNext] - b.switchToChild(childNext, priorityNext) - b.cc.UpdateState(childNext.state) - childNext.sendUpdate() - case connectivity.Idle: - b.cc.UpdateState(child.state) default: - // Old state is Connecting, TransientFailure or Shutdown. Don't forward. + // New state is Shutdown, should never happen. Don't forward. } + + child.parent.syncPriority(childName) } diff --git a/xds/internal/balancer/priority/balancer_test.go b/xds/internal/balancer/priority/balancer_test.go index be14231dcb3f..22ecca84bf25 100644 --- a/xds/internal/balancer/priority/balancer_test.go +++ b/xds/internal/balancer/priority/balancer_test.go @@ -19,6 +19,7 @@ package priority import ( + "context" "fmt" "testing" "time" @@ -28,12 +29,17 @@ import ( "google.golang.org/grpc/balancer/roundrobin" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/balancergroup" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/hierarchy" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/testutils" +) + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 100 * time.Millisecond ) type s struct { @@ -67,23 +73,16 @@ func init() { balancer.Register(&anotherRR{Builder: balancer.Get(roundrobin.Name)}) } -func subConnFromPicker(t *testing.T, p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, err := p.Pick(balancer.PickInfo{}) - if err != nil { - t.Fatalf("unexpected error from picker.Pick: %v", err) - } - return scst.SubConn - } -} - // When a high priority is ready, adding/removing lower locality doesn't cause // changes. // // Init 0 and 1; 0 is up, use 0; add 2, use 0; remove 2, use 0. func (s) TestPriority_HighPriorityReady(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -95,10 +94,10 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -117,10 +116,8 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Add p2, it shouldn't cause any updates. @@ -132,11 +129,11 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1", "child-2"}, }, @@ -145,8 +142,6 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { } select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case sc := <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn: %s", sc) case sc := <-cc.RemoveSubConnCh: @@ -154,6 +149,11 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { case <-time.After(time.Millisecond * 100): } + // Test roundrobin with only p0 subconns. + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } + // Remove p2, no updates. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ @@ -162,10 +162,10 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -174,14 +174,17 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { } select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: t.Fatalf("got unexpected remove SubConn") case <-time.After(time.Millisecond * 100): } + + // Test roundrobin with only p0 subconns. + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } } // Lower priority is used when higher priority is not ready. @@ -189,12 +192,15 @@ func (s) TestPriority_HighPriorityReady(t *testing.T) { // Init 0 and 1; 0 is up, use 0; 0 is down, 1 is up, use 1; add 2, use 1; 1 is // down, use 2; remove 2, use 1. func (s) TestPriority_SwitchPriority(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() - // Two localities, with priorities [0, 1], each with one backend. + t.Log("Two localities, with priorities [0, 1], each with one backend.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: []resolver.Address{ @@ -202,10 +208,10 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -219,30 +225,24 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { } sc0 := <-cc.NewSubConnCh - // p0 is ready. + t.Log("Make p0 ready.") pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } - // Turn down 0, will start and use 1. + t.Log("Turn down 0, will start and use 1.") pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) - // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } - // Handle SubConn creation from 1. + t.Log("Handle SubConn creation from 1.") addrs1 := <-cc.NewSubConnAddrsCh if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want { t.Fatalf("sc is created with addr %v, want %v", got, want) @@ -252,15 +252,11 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } - // Add p2, it shouldn't cause any udpates. + t.Log("Add p2, it shouldn't cause any udpates.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: []resolver.Address{ @@ -269,11 +265,11 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1", "child-2"}, }, @@ -282,8 +278,6 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { } select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case sc := <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn, %s", sc) case <-cc.RemoveSubConnCh: @@ -291,16 +285,13 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { case <-time.After(time.Millisecond * 100): } - // Turn down 1, use 2 + t.Log("Turn down 1, use 2.") pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 2 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p3.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs2 := <-cc.NewSubConnAddrsCh @@ -312,15 +303,11 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p4 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p4.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } - // Remove 2, use 1. + t.Log("Remove 2, use 1.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: []resolver.Address{ @@ -328,10 +315,10 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -347,22 +334,18 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { // Should get an update with 1's old transient failure picker, to override // 2's old picker. - p5 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p5.Pick(balancer.PickInfo{}); err == nil { - t.Fatalf("want pick error non-nil, got nil") - } + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) } + <-cc.NewStateCh // Drain to match picker pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // Does not change the aggregate state, because round robin does not leave + // TRANIENT_FAILURE if a subconn goes CONNECTING. pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - p6 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p6.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } } @@ -372,8 +355,11 @@ func (s) TestPriority_SwitchPriority(t *testing.T) { // Init 0 and 1; 0 is up, use 0; 0 is connecting, 1 is up, use 1; 0 is ready, // use 0. func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -385,10 +371,10 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -407,23 +393,17 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } - // Turn 0 to Connecting, will start and use 1. Because 0 changing from Ready - // to Connecting is a failure. - pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // Turn 0 to TransientFailure, will start and use 1. + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } // Handle SubConn creation from 1. @@ -436,12 +416,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Turn 0 back to Ready. @@ -454,12 +430,8 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { t.Fatalf("RemoveSubConn, want %v, got %v", sc0, scToRemove) } - p3 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p3.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } } @@ -467,8 +439,11 @@ func (s) TestPriority_HighPriorityToConnectingFromReady(t *testing.T) { // // Init 0 and 1; 0 and 1 both down; add 2, use 2. func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -480,10 +455,10 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -497,16 +472,13 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { } sc0 := <-cc.NewSubConnCh - // Turn down 0, 1 is used. + t.Log("Turn down 0, 1 is used.") pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -514,18 +486,17 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { t.Fatalf("sc is created with addr %v, want %v", got, want) } sc1 := <-cc.NewSubConnCh - // Turn down 1, pick should error. + + t.Log("Turn down 1, pick should error.") pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Test pick failure. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail1.Pick(balancer.PickInfo{}); err == nil { - t.Fatalf("want pick error non-nil, got nil") - } + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) } + <-cc.NewStateCh // Drain to match picker - // Add p2, it should create a new SubConn. + t.Log("Add p2, it should create a new SubConn.") if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{ Addresses: []resolver.Address{ @@ -534,11 +505,11 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1", "child-2"}, }, @@ -547,11 +518,8 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { } // A new connecting picker should be updated for the new priority. - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := p0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs2 := <-cc.NewSubConnAddrsCh @@ -563,12 +531,8 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } @@ -576,10 +540,11 @@ func (s) TestPriority_HigherDownWhileAddingLower(t *testing.T) { // // Init 0,1,2; 0 and 1 down, use 2; 0 up, close 1 and 2. func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { - // defer time.Sleep(10 * time.Millisecond) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -592,11 +557,11 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-2"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-2": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-2": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1", "child-2"}, }, @@ -614,11 +579,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -631,11 +593,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 2 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs2 := <-cc.NewSubConnAddrsCh @@ -647,12 +606,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 2. - p2 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p2.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc2, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc2) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } // When 0 becomes ready, 0 should be used, 1 and 2 should all be closed. @@ -671,12 +626,8 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { } // Test pick with 0. - p0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p0.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } } @@ -685,17 +636,20 @@ func (s) TestPriority_HigherReadyCloseAllLower(t *testing.T) { // // Init 0,1; 0 is not ready (in connecting), after timeout, use 1. func (s) TestPriority_InitTimeout(t *testing.T) { - const testPriorityInitTimeout = time.Second + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + const testPriorityInitTimeout = 200 * time.Millisecond defer func() func() { - old := defaultPriorityInitTimeout - defaultPriorityInitTimeout = testPriorityInitTimeout + old := DefaultPriorityInitTimeout + DefaultPriorityInitTimeout = testPriorityInitTimeout return func() { - defaultPriorityInitTimeout = old + DefaultPriorityInitTimeout = old } }()() cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -707,10 +661,10 @@ func (s) TestPriority_InitTimeout(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -740,32 +694,33 @@ func (s) TestPriority_InitTimeout(t *testing.T) { } sc1 := <-cc.NewSubConnCh + // After the init timer of p0, when switching to p1, a connecting picker + // will be sent to the parent. Clear it here. pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pick with 1. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } } // EDS removes all priorities, and re-adds them. func (s) TestPriority_RemovesAllPriorities(t *testing.T) { - const testPriorityInitTimeout = time.Second + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + const testPriorityInitTimeout = 200 * time.Millisecond defer func() func() { - old := defaultPriorityInitTimeout - defaultPriorityInitTimeout = testPriorityInitTimeout + old := DefaultPriorityInitTimeout + DefaultPriorityInitTimeout = testPriorityInitTimeout return func() { - defaultPriorityInitTimeout = old + DefaultPriorityInitTimeout = old } }()() cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -777,10 +732,10 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -797,10 +752,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc0} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc0); err != nil { + t.Fatal(err.Error()) } // Remove all priorities. @@ -808,7 +761,7 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { ResolverState: resolver.State{ Addresses: nil, }, - BalancerConfig: &lbConfig{ + BalancerConfig: &LBConfig{ Children: nil, Priorities: nil, }, @@ -823,11 +776,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { } // Test pick return TransientFailure. - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != errAllPrioritiesRemoved { - t.Fatalf("want pick error %v, got %v", errAllPrioritiesRemoved, err) - } + if err := cc.WaitForPickerWithErr(ctx, ErrAllPrioritiesRemoved); err != nil { + t.Fatal(err.Error()) } // Re-add two localities, with previous priorities, but different backends. @@ -838,10 +788,10 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[3]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -869,10 +819,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { pb.UpdateSubConnState(sc11, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p1 := <-cc.NewPickerCh - want = []balancer.SubConn{sc11} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc11); err != nil { + t.Fatal(err.Error()) } // Remove p1, to fallback to p0. @@ -882,9 +830,9 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -899,11 +847,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { } // Test pick return NoSubConn. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if scst, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error _, %v, got %v, _ ,%v", balancer.ErrNoSubConnAvailable, scst, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } // Send an ready update for the p0 sc that was received when re-adding @@ -912,10 +857,8 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { pb.UpdateSubConnState(sc01, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc01} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc01); err != nil { + t.Fatal(err.Error()) } select { @@ -932,8 +875,11 @@ func (s) TestPriority_RemovesAllPriorities(t *testing.T) { // Test the case where the high priority contains no backends. The low priority // will be used. func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -945,10 +891,10 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -967,10 +913,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Remove addresses from priority 0, should use p1. @@ -980,10 +924,10 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -1004,11 +948,8 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail1.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } // p1 is ready. @@ -1016,23 +957,21 @@ func (s) TestPriority_HighPriorityNoEndpoints(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p2 := <-cc.NewPickerCh - want = []balancer.SubConn{sc2} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } // Test the case where the first and only priority is removed. func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { - const testPriorityInitTimeout = time.Second + const testPriorityInitTimeout = 200 * time.Millisecond defer func(t time.Duration) { - defaultPriorityInitTimeout = t - }(defaultPriorityInitTimeout) - defaultPriorityInitTimeout = testPriorityInitTimeout + DefaultPriorityInitTimeout = t + }(DefaultPriorityInitTimeout) + DefaultPriorityInitTimeout = testPriorityInitTimeout cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1043,9 +982,9 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1058,7 +997,7 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { ResolverState: resolver.State{ Addresses: nil, }, - BalancerConfig: &lbConfig{ + BalancerConfig: &LBConfig{ Children: nil, Priorities: nil, }, @@ -1074,8 +1013,11 @@ func (s) TestPriority_FirstPriorityUnavailable(t *testing.T) { // // Init a(p0) and b(p1); a(p0) is up, use a; move b to p0, a to p1, use b. func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1087,10 +1029,10 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -1109,10 +1051,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Swap child with p0 and p1, the child at lower priority should now be the @@ -1124,10 +1064,10 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-1", "child-0"}, }, @@ -1139,11 +1079,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { // balancer should immediately update the picker so the picker from old // child is not used. In this case, the picker becomes a // no-subconn-available picker because this child is just started. - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } // Old subconn should be removed. @@ -1163,10 +1100,8 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only new subconns. - p2 := <-cc.NewPickerCh - want2 := []balancer.SubConn{sc2} - if err := testutils.IsRoundRobin(want2, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want2, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } @@ -1175,8 +1110,11 @@ func (s) TestPriority_MoveChildToHigherPriority(t *testing.T) { // // Init a(p0) and b(p1); a(p0) is down, use b; move b to p0, a to p1, use b. func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1188,10 +1126,10 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -1209,11 +1147,8 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -1225,10 +1160,8 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Swap child with p0 and p1, the child at lower priority should now be the @@ -1240,10 +1173,10 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-1", "child-0"}, }, @@ -1260,8 +1193,6 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { // Because this was a ready child moved to a higher priority, no new subconn // or picker should be updated. select { - case <-cc.NewPickerCh: - t.Fatalf("got unexpected new picker") case <-cc.NewSubConnCh: t.Fatalf("got unexpected new SubConn") case <-cc.RemoveSubConnCh: @@ -1275,8 +1206,11 @@ func (s) TestPriority_MoveReadyChildToHigherPriority(t *testing.T) { // // Init a(p0) and b(p1); a(p0) is down, use b; move b to p0, a to p1, use b. func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1288,10 +1222,10 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, - "child-1": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0", "child-1"}, }, @@ -1309,11 +1243,8 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs // will retry. - pFail0 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail0.Pick(balancer.PickInfo{}); err != balancer.ErrNoSubConnAvailable { - t.Fatalf("want pick error %v, got %v", balancer.ErrNoSubConnAvailable, err) - } + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) } addrs1 := <-cc.NewSubConnAddrsCh @@ -1325,10 +1256,8 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p1 subconns. - p0 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p0)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Remove child with p1, the child at higher priority should now be used. @@ -1338,9 +1267,9 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1354,12 +1283,10 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) } - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err == nil { - t.Fatalf("want pick error , got %v", err) - } + if err := cc.WaitForErrPicker(ctx); err != nil { + t.Fatal(err.Error()) } + <-cc.NewStateCh // Drain to match picker // Because there was no new child, no new subconn should be created. select { @@ -1374,6 +1301,9 @@ func (s) TestPriority_RemoveReadyLowestChild(t *testing.T) { // Init 0; 0 is up, use 0; remove 0, only picker is updated, no subconn is // removed; re-add 0, picker is updated. func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + const testChildCacheTimeout = time.Second defer func() func() { old := balancergroup.DefaultSubBalancerCloseTimeout @@ -1384,7 +1314,7 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { }()() cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1395,9 +1325,9 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1416,26 +1346,21 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Remove the child, it shouldn't cause any conn changed, but picker should // be different. if err := pb.UpdateClientConnState(balancer.ClientConnState{ ResolverState: resolver.State{}, - BalancerConfig: &lbConfig{}, + BalancerConfig: &LBConfig{}, }); err != nil { t.Fatalf("failed to update ClientConn state: %v", err) } - pFail := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - if _, err := pFail.Pick(balancer.PickInfo{}); err != errAllPrioritiesRemoved { - t.Fatalf("want pick error %v, got %v", errAllPrioritiesRemoved, err) - } + if err := cc.WaitForPickerWithErr(ctx, ErrAllPrioritiesRemoved); err != nil { + t.Fatal(err.Error()) } // But no conn changes should happen. Child balancer is in cache. @@ -1454,9 +1379,9 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1465,10 +1390,8 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { } // Test roundrobin with only p0 subconns. - p2 := <-cc.NewPickerCh - want2 := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want2, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want2, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // But no conn changes should happen. Child balancer is just taken out from @@ -1486,8 +1409,11 @@ func (s) TestPriority_ReadyChildRemovedButInCache(t *testing.T) { // // Init 0; 0 is up, use 0; change 0's policy, 0 is used. func (s) TestPriority_ChildPolicyChange(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1498,9 +1424,9 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, }, Priorities: []string{"child-0"}, }, @@ -1519,10 +1445,8 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test roundrobin with only p0 subconns. - p1 := <-cc.NewPickerCh - want := []balancer.SubConn{sc1} - if err := testutils.IsRoundRobin(want, subConnFromPicker(t, p1)); err != nil { - t.Fatalf("want %v, got %v", want, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) } // Change the policy for the child (still roundrobin, but with a different @@ -1533,9 +1457,9 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: testRRBalancerName}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: testRRBalancerName}}, }, Priorities: []string{"child-0"}, }, @@ -1559,10 +1483,8 @@ func (s) TestPriority_ChildPolicyChange(t *testing.T) { pb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) // Test pickfirst with the new subconns. - p2 := <-cc.NewPickerCh - want2 := []balancer.SubConn{sc2} - if err := testutils.IsRoundRobin(want2, subConnFromPicker(t, p2)); err != nil { - t.Fatalf("want %v, got %v", want2, err) + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } @@ -1586,8 +1508,11 @@ func init() { // (e.g., roundrobin handling empty addresses). There could be deadlock caused // by acquiring a locked mutex. func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cc := testutils.NewTestClientConn(t) - bb := balancer.Get(priorityBalancerName) + bb := balancer.Get(Name) pb := bb.Build(cc, balancer.BuildOptions{}) defer pb.Close() @@ -1598,9 +1523,56 @@ func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), }, }, - BalancerConfig: &lbConfig{ - Children: map[string]*child{ - "child-0": {&internalserviceconfig.BalancerConfig{Name: inlineUpdateBalancerName}}, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: inlineUpdateBalancerName}}, + }, + Priorities: []string{"child-0"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + if err := cc.WaitForPickerWithErr(ctx, errTestInlineStateUpdate); err != nil { + t.Fatal(err.Error()) + } +} + +// TestPriority_IgnoreReresolutionRequest tests the case where the priority +// policy has a single child policy. The test verifies that ResolveNow() calls +// from the child policy are ignored based on the value of the +// IgnoreReresolutionRequests field in the configuration. +func (s) TestPriority_IgnoreReresolutionRequest(t *testing.T) { + // Register a stub balancer to act the child policy of the priority policy. + // Provide an init function to the stub balancer to capture the ClientConn + // passed to the child policy. + ccCh := testutils.NewChannel() + childPolicyName := t.Name() + stub.Register(childPolicyName, stub.BalancerFuncs{ + Init: func(data *stub.BalancerData) { + ccCh.Send(data.ClientConn) + }, + }) + + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + // One children, with priorities [0], with one backend, reresolution is + // ignored. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": { + Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, + IgnoreReresolutionRequests: true, + }, }, Priorities: []string{"child-0"}, }, @@ -1608,11 +1580,463 @@ func (s) TestPriority_ChildPolicyUpdatePickerInline(t *testing.T) { t.Fatalf("failed to update ClientConn state: %v", err) } - p0 := <-cc.NewPickerCh + // Retrieve the ClientConn passed to the child policy. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := ccCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for ClientConn from the child policy") + } + balancerCC := val.(balancer.ClientConn) + + // Since IgnoreReresolutionRequests was set to true, all ResolveNow() calls + // should be ignored. for i := 0; i < 5; i++ { - _, err := p0.Pick(balancer.PickInfo{}) - if err != errTestInlineStateUpdate { - t.Fatalf("picker.Pick, got err %q, want err %q", err, errTestInlineStateUpdate) + balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + } + select { + case <-cc.ResolveNowCh: + t.Fatalf("got unexpected ResolveNow() call") + case <-time.After(defaultTestShortTimeout): + } + + // Send another update to set IgnoreReresolutionRequests to false. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": { + Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, + IgnoreReresolutionRequests: false, + }, + }, + Priorities: []string{"child-0"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Call ResolveNow() on the CC, it should be forwarded. + balancerCC.ResolveNow(resolver.ResolveNowOptions{}) + select { + case <-cc.ResolveNowCh: + case <-time.After(time.Second): + t.Fatalf("timeout waiting for ResolveNow()") + } + +} + +type wrappedRoundRobinBalancerBuilder struct { + name string + ccCh *testutils.Channel +} + +func (w *wrappedRoundRobinBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + w.ccCh.Send(cc) + rrBuilder := balancer.Get(roundrobin.Name) + return &wrappedRoundRobinBalancer{Balancer: rrBuilder.Build(cc, opts)} +} + +func (w *wrappedRoundRobinBalancerBuilder) Name() string { + return w.name +} + +type wrappedRoundRobinBalancer struct { + balancer.Balancer +} + +// TestPriority_IgnoreReresolutionRequestTwoChildren tests the case where the +// priority policy has two child policies, one of them has the +// IgnoreReresolutionRequests field set to true while the other one has it set +// to false. The test verifies that ResolveNow() calls from the child which is +// set to ignore reresolution requests are ignored, while calls from the other +// child are processed. +func (s) TestPriority_IgnoreReresolutionRequestTwoChildren(t *testing.T) { + // Register a wrapping balancer to act the child policy of the priority + // policy. The wrapping balancer builder's Build() method pushes the + // balancer.ClientConn on a channel for this test to use. + ccCh := testutils.NewChannel() + childPolicyName := t.Name() + balancer.Register(&wrappedRoundRobinBalancerBuilder{name: childPolicyName, ccCh: ccCh}) + + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + // One children, with priorities [0, 1], each with one backend. + // Reresolution is ignored for p0. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": { + Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, + IgnoreReresolutionRequests: true, + }, + "child-1": { + Config: &internalserviceconfig.BalancerConfig{Name: childPolicyName}, + }, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Retrieve the ClientConn passed to the child policy from p0. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := ccCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for ClientConn from the child policy") + } + balancerCC0 := val.(balancer.ClientConn) + + // Set p0 to transient failure, p1 will be started. + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + + // Retrieve the ClientConn passed to the child policy from p1. + val, err = ccCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout waiting for ClientConn from the child policy") + } + balancerCC1 := val.(balancer.ClientConn) + + // Since IgnoreReresolutionRequests was set to true for p0, ResolveNow() + // from p0 should all be ignored. + for i := 0; i < 5; i++ { + balancerCC0.ResolveNow(resolver.ResolveNowOptions{}) + } + select { + case <-cc.ResolveNowCh: + t.Fatalf("got unexpected ResolveNow() call") + case <-time.After(defaultTestShortTimeout): + } + + // But IgnoreReresolutionRequests was false for p1, ResolveNow() from p1 + // should be forwarded. + balancerCC1.ResolveNow(resolver.ResolveNowOptions{}) + select { + case <-cc.ResolveNowCh: + case <-time.After(defaultTestShortTimeout): + t.Fatalf("timeout waiting for ResolveNow()") + } +} + +const initIdleBalancerName = "test-init-Idle-balancer" + +var errsTestInitIdle = []error{ + fmt.Errorf("init Idle balancer error 0"), + fmt.Errorf("init Idle balancer error 1"), +} + +func init() { + for i := 0; i < 2; i++ { + ii := i + stub.Register(fmt.Sprintf("%s-%d", initIdleBalancerName, ii), stub.BalancerFuncs{ + UpdateClientConnState: func(bd *stub.BalancerData, opts balancer.ClientConnState) error { + bd.ClientConn.NewSubConn(opts.ResolverState.Addresses, balancer.NewSubConnOptions{}) + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &testutils.TestConstPicker{Err: balancer.ErrNoSubConnAvailable}, + }) + return nil + }, + UpdateSubConnState: func(bd *stub.BalancerData, sc balancer.SubConn, state balancer.SubConnState) { + err := fmt.Errorf("wrong picker error") + if state.ConnectivityState == connectivity.Idle { + err = errsTestInitIdle[ii] + } + bd.ClientConn.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &testutils.TestConstPicker{Err: err}, + }) + }, + }) + } +} + +// If the high priorities send initial pickers with Idle state, their pickers +// should get picks, because policies like ringhash starts in Idle, and doesn't +// connect. +// +// Init 0, 1; 0 is Idle, use 0; 0 is down, start 1; 1 is Idle, use 1. +func (s) TestPriority_HighPriorityInitIdle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + // Two children, with priorities [0, 1], each with one backend. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 1)}}, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + + // Send an Idle state update to trigger an Idle picker update. + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[0]); err != nil { + t.Fatal(err.Error()) + } + + // Turn p0 down, to start p1. + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs + // will retry. + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) + } + + addrs1 := <-cc.NewSubConnAddrsCh + if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc1 := <-cc.NewSubConnCh + // Idle picker from p1 should also be forwarded. + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[1]); err != nil { + t.Fatal(err.Error()) + } +} + +// If the high priorities send initial pickers with Idle state, their pickers +// should get picks, because policies like ringhash starts in Idle, and doesn't +// connect. In this case, if a lower priority is added, it shouldn't switch to +// the lower priority. +// +// Init 0; 0 is Idle, use 0; add 1, use 0. +func (s) TestPriority_AddLowPriorityWhenHighIsInIdle(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + // One child, with priorities [0], one backend. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}}, + }, + Priorities: []string{"child-0"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + + // Send an Idle state update to trigger an Idle picker update. + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + if err := cc.WaitForPickerWithErr(ctx, errsTestInitIdle[0]); err != nil { + t.Fatal(err.Error()) + } + + // Add 1, should keep using 0. + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 0)}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: fmt.Sprintf("%s-%d", initIdleBalancerName, 1)}}, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // The ClientConn state update triggers a priority switch, from p0 -> p0 + // (since p0 is still in use). Along with this the update, p0 also gets a + // ClientConn state update, with the addresses, which didn't change in this + // test (this update to the child is necessary in case the addresses are + // different). + // + // The test child policy, initIdleBalancer, blindly calls NewSubConn with + // all the addresses it receives, so this will trigger a NewSubConn with the + // old p0 addresses. (Note that in a real balancer, like roundrobin, no new + // SubConn will be created because the addresses didn't change). + // + // The check below makes sure that the addresses are still from p0, and not + // from p1. This is good enough for the purpose of this test. + addrsNew := <-cc.NewSubConnAddrsCh + if got, want := addrsNew[0].Addr, testBackendAddrStrs[0]; got != want { + // Fail if p1 is started and creates a SubConn. + t.Fatalf("got unexpected call to NewSubConn with addr: %v, want %v", addrsNew, want) + } +} + +// Lower priority is used when higher priority is not ready; higher priority +// still gets updates. +// +// Init 0 and 1; 0 is down, 1 is up, use 1; update 0; 0 is up, use 0 +func (s) TestPriority_HighPriorityUpdatesWhenLowInUse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + cc := testutils.NewTestClientConn(t) + bb := balancer.Get(Name) + pb := bb.Build(cc, balancer.BuildOptions{}) + defer pb.Close() + + t.Log("Two localities, with priorities [0, 1], each with one backend.") + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[0]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[1]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + addrs0 := <-cc.NewSubConnAddrsCh + if got, want := addrs0[0].Addr, testBackendAddrStrs[0]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc0 := <-cc.NewSubConnCh + + t.Log("Make p0 fail.") + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + pb.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + + // Before 1 gets READY, picker should return NoSubConnAvailable, so RPCs + // will retry. + if err := cc.WaitForPickerWithErr(ctx, balancer.ErrNoSubConnAvailable); err != nil { + t.Fatal(err.Error()) + } + + t.Log("Make p1 ready.") + addrs1 := <-cc.NewSubConnAddrsCh + if got, want := addrs1[0].Addr, testBackendAddrStrs[1]; got != want { + t.Fatalf("sc is created with addr %v, want %v", got, want) + } + sc1 := <-cc.NewSubConnCh + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // Test pick with 1. + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } + + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + // Does not change the aggregate state, because round robin does not leave + // TRANIENT_FAILURE if a subconn goes CONNECTING. + pb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + if err := cc.WaitForRoundRobinPicker(ctx, sc1); err != nil { + t.Fatal(err.Error()) + } + + t.Log("Change p0 to use new address.") + if err := pb.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{ + Addresses: []resolver.Address{ + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[2]}, []string{"child-0"}), + hierarchy.Set(resolver.Address{Addr: testBackendAddrStrs[3]}, []string{"child-1"}), + }, + }, + BalancerConfig: &LBConfig{ + Children: map[string]*Child{ + "child-0": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + "child-1": {Config: &internalserviceconfig.BalancerConfig{Name: roundrobin.Name}}, + }, + Priorities: []string{"child-0", "child-1"}, + }, + }); err != nil { + t.Fatalf("failed to update ClientConn state: %v", err) + } + + // Two new subconns are created by the previous update; one by p0 and one + // by p1. They don't happen concurrently, but they could happen in any + // order. + t.Log("Make p0 and p1 both ready; p0 should be used.") + var sc2, sc3 balancer.SubConn + for i := 0; i < 2; i++ { + addr := <-cc.NewSubConnAddrsCh + sc := <-cc.NewSubConnCh + switch addr[0].Addr { + case testBackendAddrStrs[2]: + sc2 = sc + case testBackendAddrStrs[3]: + sc3 = sc + default: + t.Fatalf("sc is created with addr %v, want %v or %v", addr[0].Addr, testBackendAddrStrs[2], testBackendAddrStrs[3]) } + pb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + pb.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + } + if sc2 == nil { + t.Fatalf("sc not created with addr %v", testBackendAddrStrs[2]) + } + if sc3 == nil { + t.Fatalf("sc not created with addr %v", testBackendAddrStrs[3]) + } + + // Test pick with 0. + if err := cc.WaitForRoundRobinPicker(ctx, sc2); err != nil { + t.Fatal(err.Error()) } } diff --git a/xds/internal/balancer/priority/config.go b/xds/internal/balancer/priority/config.go index da085908c71d..37f1c9a829a8 100644 --- a/xds/internal/balancer/priority/config.go +++ b/xds/internal/balancer/priority/config.go @@ -26,24 +26,27 @@ import ( "google.golang.org/grpc/serviceconfig" ) -type child struct { - Config *internalserviceconfig.BalancerConfig +// Child is a child of priority balancer. +type Child struct { + Config *internalserviceconfig.BalancerConfig `json:"config,omitempty"` + IgnoreReresolutionRequests bool `json:"ignoreReresolutionRequests,omitempty"` } -type lbConfig struct { - serviceconfig.LoadBalancingConfig +// LBConfig represents priority balancer's config. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` // Children is a map from the child balancer names to their configs. Child // names can be found in field Priorities. - Children map[string]*child + Children map[string]*Child `json:"children,omitempty"` // Priorities is a list of child balancer names. They are sorted from // highest priority to low. The type/config for each child can be found in // field Children, with the balancer name as the key. - Priorities []string + Priorities []string `json:"priorities,omitempty"` } -func parseConfig(c json.RawMessage) (*lbConfig, error) { - var cfg lbConfig +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig if err := json.Unmarshal(c, &cfg); err != nil { return nil, err } diff --git a/xds/internal/balancer/priority/config_test.go b/xds/internal/balancer/priority/config_test.go index 15c4069dd1e7..8316224c91be 100644 --- a/xds/internal/balancer/priority/config_test.go +++ b/xds/internal/balancer/priority/config_test.go @@ -30,7 +30,7 @@ func TestParseConfig(t *testing.T) { tests := []struct { name string js string - want *lbConfig + want *LBConfig wantErr bool }{ { @@ -63,26 +63,27 @@ func TestParseConfig(t *testing.T) { js: `{ "priorities": ["child-1", "child-2", "child-3"], "children": { - "child-1": {"config": [{"round_robin":{}}]}, + "child-1": {"config": [{"round_robin":{}}], "ignoreReresolutionRequests": true}, "child-2": {"config": [{"round_robin":{}}]}, "child-3": {"config": [{"round_robin":{}}]} } } `, - want: &lbConfig{ - Children: map[string]*child{ + want: &LBConfig{ + Children: map[string]*Child{ "child-1": { - &internalserviceconfig.BalancerConfig{ + Config: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, + IgnoreReresolutionRequests: true, }, "child-2": { - &internalserviceconfig.BalancerConfig{ + Config: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, }, "child-3": { - &internalserviceconfig.BalancerConfig{ + Config: &internalserviceconfig.BalancerConfig{ Name: roundrobin.Name, }, }, diff --git a/xds/internal/balancer/priority/ignore_resolve_now.go b/xds/internal/balancer/priority/ignore_resolve_now.go new file mode 100644 index 000000000000..792ee4b3f242 --- /dev/null +++ b/xds/internal/balancer/priority/ignore_resolve_now.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// ignoreResolveNowClientConn wraps a balancer.ClientConn and overrides the +// ResolveNow() method to ignore those calls if the ignoreResolveNow bit is set. +type ignoreResolveNowClientConn struct { + balancer.ClientConn + ignoreResolveNow *uint32 +} + +func newIgnoreResolveNowClientConn(cc balancer.ClientConn, ignore bool) *ignoreResolveNowClientConn { + ret := &ignoreResolveNowClientConn{ + ClientConn: cc, + ignoreResolveNow: new(uint32), + } + ret.updateIgnoreResolveNow(ignore) + return ret +} + +func (i *ignoreResolveNowClientConn) updateIgnoreResolveNow(b bool) { + if b { + atomic.StoreUint32(i.ignoreResolveNow, 1) + return + } + atomic.StoreUint32(i.ignoreResolveNow, 0) + +} + +func (i ignoreResolveNowClientConn) ResolveNow(o resolver.ResolveNowOptions) { + if atomic.LoadUint32(i.ignoreResolveNow) != 0 { + return + } + i.ClientConn.ResolveNow(o) +} diff --git a/xds/internal/balancer/priority/ignore_resolve_now_test.go b/xds/internal/balancer/priority/ignore_resolve_now_test.go new file mode 100644 index 000000000000..5a0083147888 --- /dev/null +++ b/xds/internal/balancer/priority/ignore_resolve_now_test.go @@ -0,0 +1,66 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package priority + +import ( + "context" + "testing" + "time" + + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" +) + +func (s) TestIgnoreResolveNowClientConn(t *testing.T) { + cc := testutils.NewTestClientConn(t) + ignoreCC := newIgnoreResolveNowClientConn(cc, false) + + // Call ResolveNow() on the CC, it should be forwarded. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + ignoreCC.ResolveNow(resolver.ResolveNowOptions{}) + select { + case <-cc.ResolveNowCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for ResolveNow()") + } + + // Update ignoreResolveNow to true, call ResolveNow() on the CC, they should + // all be ignored. + ignoreCC.updateIgnoreResolveNow(true) + for i := 0; i < 5; i++ { + ignoreCC.ResolveNow(resolver.ResolveNowOptions{}) + } + select { + case <-cc.ResolveNowCh: + t.Fatalf("got unexpected ResolveNow() call") + case <-time.After(defaultTestShortTimeout): + } + + // Update ignoreResolveNow to false, new ResolveNow() calls should be + // forwarded. + ignoreCC.updateIgnoreResolveNow(false) + ignoreCC.ResolveNow(resolver.ResolveNowOptions{}) + select { + case <-cc.ResolveNowCh: + case <-ctx.Done(): + t.Fatalf("timeout waiting for ResolveNow()") + } +} diff --git a/xds/internal/balancer/ringhash/config.go b/xds/internal/balancer/ringhash/config.go new file mode 100644 index 000000000000..b4afcf100132 --- /dev/null +++ b/xds/internal/balancer/ringhash/config.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/serviceconfig" +) + +// LBConfig is the balancer config for ring_hash balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + MinRingSize uint64 `json:"minRingSize,omitempty"` + MaxRingSize uint64 `json:"maxRingSize,omitempty"` +} + +const ( + defaultMinSize = 1024 + defaultMaxSize = 4096 + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M +) + +func parseConfig(c json.RawMessage) (*LBConfig, error) { + var cfg LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + if cfg.MinRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("min_ring_size value of %d is greater than max supported value %d for this field", cfg.MinRingSize, ringHashSizeUpperBound) + } + if cfg.MaxRingSize > ringHashSizeUpperBound { + return nil, fmt.Errorf("max_ring_size value of %d is greater than max supported value %d for this field", cfg.MaxRingSize, ringHashSizeUpperBound) + } + if cfg.MinRingSize == 0 { + cfg.MinRingSize = defaultMinSize + } + if cfg.MaxRingSize == 0 { + cfg.MaxRingSize = defaultMaxSize + } + if cfg.MinRingSize > cfg.MaxRingSize { + return nil, fmt.Errorf("min %v is greater than max %v", cfg.MinRingSize, cfg.MaxRingSize) + } + if cfg.MinRingSize > envconfig.RingHashCap { + cfg.MinRingSize = envconfig.RingHashCap + } + if cfg.MaxRingSize > envconfig.RingHashCap { + cfg.MaxRingSize = envconfig.RingHashCap + } + return &cfg, nil +} diff --git a/xds/internal/balancer/ringhash/config_test.go b/xds/internal/balancer/ringhash/config_test.go new file mode 100644 index 000000000000..1077d3e7dafb --- /dev/null +++ b/xds/internal/balancer/ringhash/config_test.go @@ -0,0 +1,115 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/envconfig" +) + +func (s) TestParseConfig(t *testing.T) { + tests := []struct { + name string + js string + envConfigCap uint64 + want *LBConfig + wantErr bool + }{ + { + name: "OK", + js: `{"minRingSize": 1, "maxRingSize": 2}`, + want: &LBConfig{MinRingSize: 1, MaxRingSize: 2}, + }, + { + name: "OK with default min", + js: `{"maxRingSize": 2000}`, + want: &LBConfig{MinRingSize: defaultMinSize, MaxRingSize: 2000}, + }, + { + name: "OK with default max", + js: `{"minRingSize": 2000}`, + want: &LBConfig{MinRingSize: 2000, MaxRingSize: defaultMaxSize}, + }, + { + name: "min greater than max", + js: `{"minRingSize": 10, "maxRingSize": 2}`, + want: nil, + wantErr: true, + }, + { + name: "min greater than max greater than global limit", + js: `{"minRingSize": 6000, "maxRingSize": 5000}`, + want: nil, + wantErr: true, + }, + { + name: "max greater than global limit", + js: `{"minRingSize": 1, "maxRingSize": 6000}`, + want: &LBConfig{MinRingSize: 1, MaxRingSize: 4096}, + }, + { + name: "min and max greater than global limit", + js: `{"minRingSize": 5000, "maxRingSize": 6000}`, + want: &LBConfig{MinRingSize: 4096, MaxRingSize: 4096}, + }, + { + name: "min and max less than raised global limit", + js: `{"minRingSize": 5000, "maxRingSize": 6000}`, + envConfigCap: 8000, + want: &LBConfig{MinRingSize: 5000, MaxRingSize: 6000}, + }, + { + name: "min and max greater than raised global limit", + js: `{"minRingSize": 10000, "maxRingSize": 10000}`, + envConfigCap: 8000, + want: &LBConfig{MinRingSize: 8000, MaxRingSize: 8000}, + }, + { + name: "min greater than upper bound", + js: `{"minRingSize": 8388610, "maxRingSize": 10}`, + want: nil, + wantErr: true, + }, + { + name: "max greater than upper bound", + js: `{"minRingSize": 10, "maxRingSize": 8388610}`, + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.envConfigCap != 0 { + old := envconfig.RingHashCap + defer func() { envconfig.RingHashCap = old }() + envconfig.RingHashCap = tt.envConfigCap + } + got, err := parseConfig([]byte(tt.js)) + if (err != nil) != tt.wantErr { + t.Errorf("parseConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(got, tt.want); diff != "" { + t.Errorf("parseConfig() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} diff --git a/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go new file mode 100644 index 000000000000..4105e3550b7c --- /dev/null +++ b/xds/internal/balancer/ringhash/e2e/ringhash_balancer_test.go @@ -0,0 +1,150 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash_test + +import ( + "context" + "testing" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" + + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" + + _ "google.golang.org/grpc/xds/internal/balancer/ringhash" // Register the ring_hash_experimental LB policy. +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. +) + +type testService struct { + testgrpc.TestServiceServer +} + +func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { + return &testpb.Empty{}, nil +} + +// TestRingHash_ReconnectToMoveOutOfTransientFailure tests the case where the +// ring contains a single subConn, and verifies that when the server goes down, +// the LB policy on the client automatically reconnects until the subChannel +// moves out of TRANSIENT_FAILURE. +func (s) TestRingHash_ReconnectToMoveOutOfTransientFailure(t *testing.T) { + // Create a restartable listener to simulate server being down. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + + // Start a server backend exposing the test service. + server := grpc.NewServer() + defer server.Stop() + testgrpc.RegisterTestServiceServer(server, &testService{}) + go func() { + if err := server.Serve(lis); err != nil { + t.Errorf("Serve() failed: %v", err) + } + }() + + // Create a clientConn with a manual resolver (which is used to push the + // address of the test backend), and a default service config pointing to + // the use of the ring_hash_experimental LB policy. + const ringHashServiceConfig = `{"loadBalancingConfig": [{"ring_hash_experimental":{}}]}` + r := manual.NewBuilderWithScheme("whatever") + dopts := []grpc.DialOption{ + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithResolvers(r), + grpc.WithDefaultServiceConfig(ringHashServiceConfig), + } + cc, err := grpc.Dial(r.Scheme()+":///test.server", dopts...) + if err != nil { + t.Fatalf("failed to dial local test server: %v", err) + } + defer cc.Close() + + // Push the address of the test backend through the manual resolver. + r.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: lis.Addr().String()}}}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + client := testgrpc.NewTestServiceClient(cc) + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } + + // Stopping the server listener will close the transport on the client, + // which will lead to the channel eventually moving to IDLE. The ring_hash + // LB policy is not expected to reconnect by itself at this point. + lis.Stop() + for state := cc.GetState(); state != connectivity.Idle && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout waiting for channel to reach %q after server shutdown: %v", connectivity.Idle, err) + } + + // Make an RPC to get the ring_hash LB policy to reconnect and thereby move + // to TRANSIENT_FAILURE upon connection failure. + client.EmptyCall(ctx, &testpb.Empty{}) + for state := cc.GetState(); state != connectivity.TransientFailure && cc.WaitForStateChange(ctx, state); state = cc.GetState() { + } + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout waiting for channel to reach %q after server shutdown: %v", connectivity.TransientFailure, err) + } + + // An RPC at this point is expected to fail. + if _, err = client.EmptyCall(ctx, &testpb.Empty{}); err == nil { + t.Fatal("EmptyCall RPC succeeded when the channel is in TRANSIENT_FAILURE") + } + + // Restart the server listener. The ring_hash LB polcy is expected to + // attempt to reconnect on its own and come out of TRANSIENT_FAILURE, even + // without an RPC attempt. + lis.Restart() + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if cc.GetState() == connectivity.Ready { + break + } + } + if err := ctx.Err(); err != nil { + t.Fatalf("Timeout waiting for channel to reach READT after server restart: %v", err) + } + + // An RPC at this point is expected to fail. + if _, err := client.EmptyCall(ctx, &testpb.Empty{}); err != nil { + t.Fatalf("rpc EmptyCall() failed: %v", err) + } +} diff --git a/xds/internal/balancer/ringhash/logging.go b/xds/internal/balancer/ringhash/logging.go new file mode 100644 index 000000000000..3e0f0adf58eb --- /dev/null +++ b/xds/internal/balancer/ringhash/logging.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[ring-hash-lb %p] " + +var logger = grpclog.Component("xds") + +func prefixLogger(p *ringhashBalancer) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) +} + +func subConnPrefixLogger(p *ringhashBalancer, sc *subConn) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)+fmt.Sprintf("[subConn %p] ", sc)) +} diff --git a/xds/internal/balancer/ringhash/picker.go b/xds/internal/balancer/ringhash/picker.go new file mode 100644 index 000000000000..b450716fa0f0 --- /dev/null +++ b/xds/internal/balancer/ringhash/picker.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/status" +) + +type picker struct { + ring *ring + logger *grpclog.PrefixLogger + subConnStates map[*subConn]connectivity.State +} + +func newPicker(ring *ring, logger *grpclog.PrefixLogger) *picker { + states := make(map[*subConn]connectivity.State) + for _, e := range ring.items { + states[e.sc] = e.sc.effectiveState() + } + return &picker{ring: ring, logger: logger, subConnStates: states} +} + +// handleRICSResult is the return type of handleRICS. It's needed to wrap the +// returned error from Pick() in a struct. With this, if the return values are +// `balancer.PickResult, error, bool`, linter complains because error is not the +// last return value. +type handleRICSResult struct { + pr balancer.PickResult + err error +} + +// handleRICS generates pick result if the entry is in Ready, Idle, Connecting +// or Shutdown. TransientFailure will be handled specifically after this +// function returns. +// +// The first return value indicates if the state is in Ready, Idle, Connecting +// or Shutdown. If it's true, the PickResult and error should be returned from +// Pick() as is. +func (p *picker) handleRICS(e *ringEntry) (handleRICSResult, bool) { + switch state := p.subConnStates[e.sc]; state { + case connectivity.Ready: + return handleRICSResult{pr: balancer.PickResult{SubConn: e.sc.sc}}, true + case connectivity.Idle: + // Trigger Connect() and queue the pick. + e.sc.queueConnect() + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.Connecting: + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + case connectivity.TransientFailure: + // Return ok==false, so TransientFailure will be handled afterwards. + return handleRICSResult{}, false + case connectivity.Shutdown: + // Shutdown can happen in a race where the old picker is called. A new + // picker should already be sent. + return handleRICSResult{err: balancer.ErrNoSubConnAvailable}, true + default: + // Should never reach this. All the connectivity states are already + // handled in the cases. + p.logger.Errorf("SubConn has undefined connectivity state: %v", state) + return handleRICSResult{err: status.Errorf(codes.Unavailable, "SubConn has undefined connectivity state: %v", state)}, true + } +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + e := p.ring.pick(getRequestHash(info.Ctx)) + if hr, ok := p.handleRICS(e); ok { + return hr.pr, hr.err + } + // ok was false, the entry is in transient failure. + return p.handleTransientFailure(e) +} + +func (p *picker) handleTransientFailure(e *ringEntry) (balancer.PickResult, error) { + // Queue a connect on the first picked SubConn. + e.sc.queueConnect() + + // Find next entry in the ring, skipping duplicate SubConns. + e2 := nextSkippingDuplicates(p.ring, e) + if e2 == nil { + // There's no next entry available, fail the pick. + return balancer.PickResult{}, fmt.Errorf("the only SubConn is in Transient Failure") + } + + // For the second SubConn, also check Ready/Idle/Connecting as if it's the + // first entry. + if hr, ok := p.handleRICS(e2); ok { + return hr.pr, hr.err + } + + // The second SubConn is also in TransientFailure. Queue a connect on it. + e2.sc.queueConnect() + + // If it gets here, this is after the second SubConn, and the second SubConn + // was in TransientFailure. + // + // Loop over all other SubConns: + // - If all SubConns so far are all TransientFailure, trigger Connect() on + // the TransientFailure SubConns, and keep going. + // - If there's one SubConn that's not in TransientFailure, keep checking + // the remaining SubConns (in case there's a Ready, which will be returned), + // but don't not trigger Connect() on the other SubConns. + var firstNonFailedFound bool + for ee := nextSkippingDuplicates(p.ring, e2); ee != e; ee = nextSkippingDuplicates(p.ring, ee) { + scState := p.subConnStates[ee.sc] + if scState == connectivity.Ready { + return balancer.PickResult{SubConn: ee.sc.sc}, nil + } + if firstNonFailedFound { + continue + } + if scState == connectivity.TransientFailure { + // This will queue a connect. + ee.sc.queueConnect() + continue + } + // This is a SubConn in a non-failure state. We continue to check the + // other SubConns, but remember that there was a non-failed SubConn + // seen. After this, Pick() will never trigger any SubConn to Connect(). + firstNonFailedFound = true + if scState == connectivity.Idle { + // This is the first non-failed SubConn, and it is in a real Idle + // state. Trigger it to Connect(). + ee.sc.queueConnect() + } + } + return balancer.PickResult{}, fmt.Errorf("no connection is Ready") +} + +// nextSkippingDuplicates finds the next entry in the ring, with a different +// subconn from the given entry. +func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { + for next := ring.next(entry); next != entry; next = ring.next(next) { + if next.sc != entry.sc { + return next + } + } + // There's no qualifying next entry. + return nil +} + +// nextSkippingDuplicatesSubConn finds the next subconn in the ring, that's +// different from the given subconn. +func nextSkippingDuplicatesSubConn(ring *ring, sc *subConn) *subConn { + var entry *ringEntry + for _, it := range ring.items { + if it.sc == sc { + entry = it + break + } + } + if entry == nil { + // If the given subconn is not in the ring (e.g. it was deleted), return + // the first one. + if len(ring.items) > 0 { + return ring.items[0].sc + } + return nil + } + ee := nextSkippingDuplicates(ring, entry) + if ee == nil { + return nil + } + return ee.sc +} diff --git a/xds/internal/balancer/ringhash/picker_test.go b/xds/internal/balancer/ringhash/picker_test.go new file mode 100644 index 000000000000..7accb1b4c00f --- /dev/null +++ b/xds/internal/balancer/ringhash/picker_test.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + igrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/testutils" +) + +func newTestRing(cStats []connectivity.State) *ring { + var items []*ringEntry + for i, st := range cStats { + testSC := testutils.TestSubConns[i] + items = append(items, &ringEntry{ + idx: i, + hash: uint64((i + 1) * 10), + sc: &subConn{ + addr: testSC.String(), + sc: testSC, + state: st, + }, + }) + } + return &ring{items: items} +} + +func (s) TestPickerPickFirstTwo(t *testing.T) { + tests := []struct { + name string + ring *ring + hash uint64 + wantSC balancer.SubConn + wantErr error + wantSCToConnect balancer.SubConn + }{ + { + name: "picked is Ready", + ring: newTestRing([]connectivity.State{connectivity.Ready, connectivity.Idle}), + hash: 5, + wantSC: testutils.TestSubConns[0], + }, + { + name: "picked is connecting, queue", + ring: newTestRing([]connectivity.State{connectivity.Connecting, connectivity.Idle}), + hash: 5, + wantErr: balancer.ErrNoSubConnAvailable, + }, + { + name: "picked is Idle, connect and queue", + ring: newTestRing([]connectivity.State{connectivity.Idle, connectivity.Idle}), + hash: 5, + wantErr: balancer.ErrNoSubConnAvailable, + wantSCToConnect: testutils.TestSubConns[0], + }, + { + name: "picked is TransientFailure, next is ready, return", + ring: newTestRing([]connectivity.State{connectivity.TransientFailure, connectivity.Ready}), + hash: 5, + wantSC: testutils.TestSubConns[1], + }, + { + name: "picked is TransientFailure, next is connecting, queue", + ring: newTestRing([]connectivity.State{connectivity.TransientFailure, connectivity.Connecting}), + hash: 5, + wantErr: balancer.ErrNoSubConnAvailable, + }, + { + name: "picked is TransientFailure, next is Idle, connect and queue", + ring: newTestRing([]connectivity.State{connectivity.TransientFailure, connectivity.Idle}), + hash: 5, + wantErr: balancer.ErrNoSubConnAvailable, + wantSCToConnect: testutils.TestSubConns[1], + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := newPicker(tt.ring, igrpclog.NewPrefixLogger(grpclog.Component("xds"), "rh_test")) + got, err := p.Pick(balancer.PickInfo{ + Ctx: SetRequestHash(context.Background(), tt.hash), + }) + if err != tt.wantErr { + t.Errorf("Pick() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !cmp.Equal(got, balancer.PickResult{SubConn: tt.wantSC}, cmpOpts) { + t.Errorf("Pick() got = %v, want picked SubConn: %v", got, tt.wantSC) + } + if sc := tt.wantSCToConnect; sc != nil { + select { + case <-sc.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc) + } + } + }) + } +} + +// TestPickerPickTriggerTFConnect covers that if the picked SubConn is +// TransientFailures, all SubConns until a non-TransientFailure are queued for +// Connect(). +func (s) TestPickerPickTriggerTFConnect(t *testing.T) { + ring := newTestRing([]connectivity.State{ + connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, + connectivity.Idle, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, + }) + p := newPicker(ring, igrpclog.NewPrefixLogger(grpclog.Component("xds"), "rh_test")) + _, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) + if err == nil { + t.Fatalf("Pick() error = %v, want non-nil", err) + } + // The first 4 SubConns, all in TransientFailure, should be queued to + // connect. + for i := 0; i < 4; i++ { + it := ring.items[i] + if !it.sc.connectQueued { + t.Errorf("the %d-th SubConn is not queued for connect", i) + } + } + // The other SubConns, after the first Idle, should not be queued to + // connect. + for i := 5; i < len(ring.items); i++ { + it := ring.items[i] + if it.sc.connectQueued { + t.Errorf("the %d-th SubConn is unexpected queued for connect", i) + } + } +} + +// TestPickerPickTriggerTFReturnReady covers that if the picked SubConn is +// TransientFailure, SubConn 2 and 3 are TransientFailure, 4 is Ready. SubConn 2 +// and 3 will Connect(), and 4 will be returned. +func (s) TestPickerPickTriggerTFReturnReady(t *testing.T) { + ring := newTestRing([]connectivity.State{ + connectivity.TransientFailure, connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Ready, + }) + p := newPicker(ring, igrpclog.NewPrefixLogger(grpclog.Component("xds"), "rh_test")) + pr, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) + if err != nil { + t.Fatalf("Pick() error = %v, want nil", err) + } + if wantSC := testutils.TestSubConns[3]; pr.SubConn != wantSC { + t.Fatalf("Pick() = %v, want %v", pr.SubConn, wantSC) + } + // The first 3 SubConns, all in TransientFailure, should be queued to + // connect. + for i := 0; i < 3; i++ { + it := ring.items[i] + if !it.sc.connectQueued { + t.Errorf("the %d-th SubConn is not queued for connect", i) + } + } +} + +// TestPickerPickTriggerTFWithIdle covers that if the picked SubConn is +// TransientFailure, SubConn 2 is TransientFailure, 3 is Idle (init Idle). Pick +// will be queue, SubConn 3 will Connect(), SubConn 4 and 5 (in TransientFailre) +// will not queue a Connect. +func (s) TestPickerPickTriggerTFWithIdle(t *testing.T) { + ring := newTestRing([]connectivity.State{ + connectivity.TransientFailure, connectivity.TransientFailure, connectivity.Idle, connectivity.TransientFailure, connectivity.TransientFailure, + }) + p := newPicker(ring, igrpclog.NewPrefixLogger(grpclog.Component("xds"), "rh_test")) + _, err := p.Pick(balancer.PickInfo{Ctx: SetRequestHash(context.Background(), 5)}) + if err == balancer.ErrNoSubConnAvailable { + t.Fatalf("Pick() error = %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + // The first 2 SubConns, all in TransientFailure, should be queued to + // connect. + for i := 0; i < 2; i++ { + it := ring.items[i] + if !it.sc.connectQueued { + t.Errorf("the %d-th SubConn is not queued for connect", i) + } + } + // SubConn 3 was in Idle, so should Connect() + select { + case <-testutils.TestSubConns[2].ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", testutils.TestSubConns[2]) + } + // The other SubConns, after the first Idle, should not be queued to + // connect. + for i := 3; i < len(ring.items); i++ { + it := ring.items[i] + if it.sc.connectQueued { + t.Errorf("the %d-th SubConn is unexpected queued for connect", i) + } + } +} + +func (s) TestNextSkippingDuplicatesNoDup(t *testing.T) { + testRing := newTestRing([]connectivity.State{connectivity.Idle, connectivity.Idle}) + tests := []struct { + name string + ring *ring + cur *ringEntry + want *ringEntry + }{ + { + name: "no dup", + ring: testRing, + cur: testRing.items[0], + want: testRing.items[1], + }, + { + name: "only one entry", + ring: &ring{items: []*ringEntry{testRing.items[0]}}, + cur: testRing.items[0], + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := nextSkippingDuplicates(tt.ring, tt.cur); !cmp.Equal(got, tt.want, cmpOpts) { + t.Errorf("nextSkippingDuplicates() = %v, want %v", got, tt.want) + } + }) + } +} + +// addDups adds duplicates of items[0] to the ring. +func addDups(r *ring, count int) *ring { + var ( + items []*ringEntry + idx int + ) + for i, it := range r.items { + itt := *it + itt.idx = idx + items = append(items, &itt) + idx++ + if i == 0 { + // Add duplicate of items[0] to the ring + for j := 0; j < count; j++ { + itt2 := *it + itt2.idx = idx + items = append(items, &itt2) + idx++ + } + } + } + return &ring{items: items} +} + +func (s) TestNextSkippingDuplicatesMoreDup(t *testing.T) { + testRing := newTestRing([]connectivity.State{connectivity.Idle, connectivity.Idle}) + // Make a new ring with duplicate SubConns. + dupTestRing := addDups(testRing, 3) + if got := nextSkippingDuplicates(dupTestRing, dupTestRing.items[0]); !cmp.Equal(got, dupTestRing.items[len(dupTestRing.items)-1], cmpOpts) { + t.Errorf("nextSkippingDuplicates() = %v, want %v", got, dupTestRing.items[len(dupTestRing.items)-1]) + } +} + +func (s) TestNextSkippingDuplicatesOnlyDup(t *testing.T) { + testRing := newTestRing([]connectivity.State{connectivity.Idle}) + // Make a new ring with only duplicate SubConns. + dupTestRing := addDups(testRing, 3) + // This ring only has duplicates of items[0], should return nil. + if got := nextSkippingDuplicates(dupTestRing, dupTestRing.items[0]); got != nil { + t.Errorf("nextSkippingDuplicates() = %v, want nil", got) + } +} diff --git a/xds/internal/balancer/ringhash/ring.go b/xds/internal/balancer/ringhash/ring.go new file mode 100644 index 000000000000..4d7fdb35e722 --- /dev/null +++ b/xds/internal/balancer/ringhash/ring.go @@ -0,0 +1,169 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "math" + "sort" + "strconv" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/resolver" +) + +type ring struct { + items []*ringEntry +} + +type subConnWithWeight struct { + sc *subConn + weight float64 +} + +type ringEntry struct { + idx int + hash uint64 + sc *subConn +} + +// newRing creates a ring from the subConns stored in the AddressMap. The ring +// size is limited by the passed in max/min. +// +// ring entries will be created for each subConn, and subConn with high weight +// (specified by the address) may have multiple entries. +// +// For example, for subConns with weights {a:3, b:3, c:4}, a generated ring of +// size 10 could be: +// - {idx:0 hash:3689675255460411075 b} +// - {idx:1 hash:4262906501694543955 c} +// - {idx:2 hash:5712155492001633497 c} +// - {idx:3 hash:8050519350657643659 b} +// - {idx:4 hash:8723022065838381142 b} +// - {idx:5 hash:11532782514799973195 a} +// - {idx:6 hash:13157034721563383607 c} +// - {idx:7 hash:14468677667651225770 c} +// - {idx:8 hash:17336016884672388720 a} +// - {idx:9 hash:18151002094784932496 a} +// +// To pick from a ring, a binary search will be done for the given target hash, +// and first item with hash >= given hash will be returned. +// +// Must be called with a non-empty subConns map. +func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, logger *grpclog.PrefixLogger) *ring { + logger.Debugf("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) + + // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 + normalizedWeights, minWeight := normalizeWeights(subConns) + logger.Debugf("newRing: normalized subConn weights is %v", normalizedWeights) + + // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. + + // Scale up the size of the ring such that the least-weighted host gets a + // whole number of hashes on the ring. + // + // Note that size is limited by the input max/min. + scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) + ringSize := math.Ceil(scale) + items := make([]*ringEntry, 0, int(ringSize)) + logger.Debugf("newRing: creating new ring of size %v", ringSize) + + // For each entry, scale*weight nodes are generated in the ring. + // + // Not all of these are whole numbers. E.g. for weights {a:3,b:3,c:4}, if + // ring size is 7, scale is 6.66. The numbers of nodes will be + // {a,a,b,b,c,c,c}. + // + // A hash is generated for each item, and later the results will be sorted + // based on the hash. + var currentHashes, targetHashes float64 + for _, scw := range normalizedWeights { + targetHashes += scale * scw.weight + // This index ensures that ring entries corresponding to the same + // address hash to different values. And since this index is + // per-address, these entries hash to the same value across address + // updates. + idx := 0 + for currentHashes < targetHashes { + h := xxhash.Sum64String(scw.sc.addr + "_" + strconv.Itoa(idx)) + items = append(items, &ringEntry{hash: h, sc: scw.sc}) + idx++ + currentHashes++ + } + } + + // Sort items based on hash, to prepare for binary search. + sort.Slice(items, func(i, j int) bool { return items[i].hash < items[j].hash }) + for i, ii := range items { + ii.idx = i + } + return &ring{items: items} +} + +// normalizeWeights divides all the weights by the sum, so that the total weight +// is 1. +// +// Must be called with a non-empty subConns map. +func normalizeWeights(subConns *resolver.AddressMap) ([]subConnWithWeight, float64) { + var weightSum uint32 + keys := subConns.Keys() + for _, a := range keys { + weightSum += getWeightAttribute(a) + } + ret := make([]subConnWithWeight, 0, len(keys)) + min := float64(1.0) + for _, a := range keys { + v, _ := subConns.Get(a) + scInfo := v.(*subConn) + // getWeightAttribute() returns 1 if the weight attribute is not found + // on the address. And since this function is guaranteed to be called + // with a non-empty subConns map, weightSum is guaranteed to be + // non-zero. So, we need not worry about divide a by zero error here. + nw := float64(getWeightAttribute(a)) / float64(weightSum) + ret = append(ret, subConnWithWeight{sc: scInfo, weight: nw}) + if nw < min { + min = nw + } + } + // Sort the addresses to return consistent results. + // + // Note: this might not be necessary, but this makes sure the ring is + // consistent as long as the addresses are the same, for example, in cases + // where an address is added and then removed, the RPCs will still pick the + // same old SubConn. + sort.Slice(ret, func(i, j int) bool { return ret[i].sc.addr < ret[j].sc.addr }) + return ret, min +} + +// pick does a binary search. It returns the item with smallest index i that +// r.items[i].hash >= h. +func (r *ring) pick(h uint64) *ringEntry { + i := sort.Search(len(r.items), func(i int) bool { return r.items[i].hash >= h }) + if i == len(r.items) { + // If not found, and h is greater than the largest hash, return the + // first item. + i = 0 + } + return r.items[i] +} + +// next returns the next entry. +func (r *ring) next(e *ringEntry) *ringEntry { + return r.items[(e.idx+1)%len(r.items)] +} diff --git a/xds/internal/balancer/ringhash/ring_test.go b/xds/internal/balancer/ringhash/ring_test.go new file mode 100644 index 000000000000..9c6eb0c242ff --- /dev/null +++ b/xds/internal/balancer/ringhash/ring_test.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "fmt" + "math" + "testing" + + xxhash "github.com/cespare/xxhash/v2" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/resolver" +) + +var testAddrs []resolver.Address +var testSubConnMap *resolver.AddressMap + +func init() { + testAddrs = []resolver.Address{ + testAddr("a", 3), + testAddr("b", 3), + testAddr("c", 4), + } + testSubConnMap = resolver.NewAddressMap() + testSubConnMap.Set(testAddrs[0], &subConn{addr: "a"}) + testSubConnMap.Set(testAddrs[1], &subConn{addr: "b"}) + testSubConnMap.Set(testAddrs[2], &subConn{addr: "c"}) +} + +func testAddr(addr string, weight uint32) resolver.Address { + return weightedroundrobin.SetAddrInfo(resolver.Address{Addr: addr}, weightedroundrobin.AddrInfo{Weight: weight}) +} + +func (s) TestRingNew(t *testing.T) { + var totalWeight float64 = 10 + for _, min := range []uint64{3, 4, 6, 8} { + for _, max := range []uint64{20, 8} { + t.Run(fmt.Sprintf("size-min-%v-max-%v", min, max), func(t *testing.T) { + r := newRing(testSubConnMap, min, max, nil) + totalCount := len(r.items) + if totalCount < int(min) || totalCount > int(max) { + t.Fatalf("unexpect size %v, want min %v, max %v", totalCount, min, max) + } + for _, a := range testAddrs { + var count int + for _, ii := range r.items { + if ii.sc.addr == a.Addr { + count++ + } + } + got := float64(count) / float64(totalCount) + want := float64(getWeightAttribute(a)) / totalWeight + if !equalApproximately(got, want) { + t.Fatalf("unexpected item weight in ring: %v != %v", got, want) + } + } + }) + } + } +} + +func equalApproximately(x, y float64) bool { + delta := math.Abs(x - y) + mean := math.Abs(x+y) / 2.0 + return delta/mean < 0.25 +} + +func (s) TestRingPick(t *testing.T) { + r := newRing(testSubConnMap, 10, 20, nil) + for _, h := range []uint64{xxhash.Sum64String("1"), xxhash.Sum64String("2"), xxhash.Sum64String("3"), xxhash.Sum64String("4")} { + t.Run(fmt.Sprintf("picking-hash-%v", h), func(t *testing.T) { + e := r.pick(h) + var low uint64 + if e.idx > 0 { + low = r.items[e.idx-1].hash + } + high := e.hash + // h should be in [low, high). + if h < low || h >= high { + t.Fatalf("unexpected item picked, hash: %v, low: %v, high: %v", h, low, high) + } + }) + } +} + +func (s) TestRingNext(t *testing.T) { + r := newRing(testSubConnMap, 10, 20, nil) + + for _, e := range r.items { + ne := r.next(e) + if ne.idx != (e.idx+1)%len(r.items) { + t.Fatalf("next(%+v) returned unexpected %+v", e, ne) + } + } +} diff --git a/xds/internal/balancer/ringhash/ringhash.go b/xds/internal/balancer/ringhash/ringhash.go new file mode 100644 index 000000000000..005efd1c581c --- /dev/null +++ b/xds/internal/balancer/ringhash/ringhash.go @@ -0,0 +1,509 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package ringhash implements the ringhash balancer. +package ringhash + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// Name is the name of the ring_hash balancer. +const Name = "ring_hash_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + b := &ringhashBalancer{ + cc: cc, + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]*subConn), + csEvltr: &connectivityStateEvaluator{}, + } + b.logger = prefixLogger(b) + b.logger.Infof("Created") + return b +} + +func (bb) Name() string { + return Name +} + +func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return parseConfig(c) +} + +type subConn struct { + addr string + weight uint32 + sc balancer.SubConn + logger *grpclog.PrefixLogger + + mu sync.RWMutex + // This is the actual state of this SubConn (as updated by the ClientConn). + // The effective state can be different, see comment of attemptedToConnect. + state connectivity.State + // failing is whether this SubConn is in a failing state. A subConn is + // considered to be in a failing state if it was previously in + // TransientFailure. + // + // This affects the effective connectivity state of this SubConn, e.g. + // - if the actual state is Idle or Connecting, but this SubConn is failing, + // the effective state is TransientFailure. + // + // This is used in pick(). E.g. if a subConn is Idle, but has failing as + // true, pick() will + // - consider this SubConn as TransientFailure, and check the state of the + // next SubConn. + // - trigger Connect() (note that normally a SubConn in real + // TransientFailure cannot Connect()) + // + // A subConn starts in non-failing (failing is false). A transition to + // TransientFailure sets failing to true (and it stays true). A transition + // to Ready sets failing to false. + failing bool + // connectQueued is true if a Connect() was queued for this SubConn while + // it's not in Idle (most likely was in TransientFailure). A Connect() will + // be triggered on this SubConn when it turns Idle. + // + // When connectivity state is updated to Idle for this SubConn, if + // connectQueued is true, Connect() will be called on the SubConn. + connectQueued bool + // attemptingToConnect indicates if this subconn is attempting to connect. + // It's set when queueConnect is called. It's unset when the state is + // changed to Ready/Shutdown, or Idle (and if connectQueued is false). + attemptingToConnect bool +} + +// setState updates the state of this SubConn. +// +// It also handles the queued Connect(). If the new state is Idle, and a +// Connect() was queued, this SubConn will be triggered to Connect(). +func (sc *subConn) setState(s connectivity.State) { + sc.mu.Lock() + defer sc.mu.Unlock() + switch s { + case connectivity.Idle: + // Trigger Connect() if new state is Idle, and there is a queued connect. + if sc.connectQueued { + sc.connectQueued = false + sc.logger.Infof("Executing a queued connect for subConn moving to state: %v", sc.state) + sc.sc.Connect() + } else { + sc.attemptingToConnect = false + } + case connectivity.Connecting: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + case connectivity.Ready: + // Clear connectQueued if the SubConn isn't failing. This state + // transition is unlikely to happen, but handle this just in case. + sc.connectQueued = false + sc.attemptingToConnect = false + // Set to a non-failing state. + sc.failing = false + case connectivity.TransientFailure: + // Set to a failing state. + sc.failing = true + case connectivity.Shutdown: + sc.attemptingToConnect = false + } + sc.state = s +} + +// effectiveState returns the effective state of this SubConn. It can be +// different from the actual state, e.g. Idle while the subConn is failing is +// considered TransientFailure. Read comment of field failing for other cases. +func (sc *subConn) effectiveState() connectivity.State { + sc.mu.RLock() + defer sc.mu.RUnlock() + if sc.failing && (sc.state == connectivity.Idle || sc.state == connectivity.Connecting) { + return connectivity.TransientFailure + } + return sc.state +} + +// queueConnect sets a boolean so that when the SubConn state changes to Idle, +// it's Connect() will be triggered. If the SubConn state is already Idle, it +// will just call Connect(). +func (sc *subConn) queueConnect() { + sc.mu.Lock() + defer sc.mu.Unlock() + sc.attemptingToConnect = true + if sc.state == connectivity.Idle { + sc.logger.Infof("Executing a queued connect for subConn in state: %v", sc.state) + sc.sc.Connect() + return + } + // Queue this connect, and when this SubConn switches back to Idle (happens + // after backoff in TransientFailure), it will Connect(). + sc.logger.Infof("Queueing a connect for subConn in state: %v", sc.state) + sc.connectQueued = true +} + +func (sc *subConn) isAttemptingToConnect() bool { + sc.mu.Lock() + defer sc.mu.Unlock() + return sc.attemptingToConnect +} + +type ringhashBalancer struct { + cc balancer.ClientConn + logger *grpclog.PrefixLogger + + config *LBConfig + subConns *resolver.AddressMap // Map from resolver.Address to `*subConn`. + scStates map[balancer.SubConn]*subConn + + // ring is always in sync with subConns. When subConns change, a new ring is + // generated. Note that address weights updates (they are keys in the + // subConns map) also regenerates the ring. + ring *ring + picker balancer.Picker + csEvltr *connectivityStateEvaluator + state connectivity.State + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +// updateAddresses creates new SubConns and removes SubConns, based on the +// address update. +// +// The return value is whether the new address list is different from the +// previous. True if +// - an address was added +// - an address was removed +// - an address's weight was updated +// +// Note that this function doesn't trigger SubConn connecting, so all the new +// SubConn states are Idle. +func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { + var addrsUpdated bool + // addrsSet is the set converted from addrs, used for quick lookup. + addrsSet := resolver.NewAddressMap() + for _, addr := range addrs { + addrsSet.Set(addr, true) + newWeight := getWeightAttribute(addr) + if val, ok := b.subConns.Get(addr); !ok { + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{HealthCheckEnabled: true}) + if err != nil { + b.logger.Warningf("Failed to create new SubConn: %v", err) + continue + } + scs := &subConn{addr: addr.Addr, weight: newWeight, sc: sc} + scs.logger = subConnPrefixLogger(b, scs) + scs.setState(connectivity.Idle) + b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) + b.subConns.Set(addr, scs) + b.scStates[sc] = scs + addrsUpdated = true + } else { + // We have seen this address before and created a subConn for it. If the + // weight associated with the address has changed, update the subConns map + // with the new weight. This will be used when a new ring is created. + // + // There is no need to call UpdateAddresses on the subConn at this point + // since *only* the weight attribute has changed, and that does not affect + // subConn uniqueness. + scInfo := val.(*subConn) + if oldWeight := scInfo.weight; oldWeight != newWeight { + scInfo.weight = newWeight + b.subConns.Set(addr, scInfo) + // Return true to force recreation of the ring. + addrsUpdated = true + } + } + } + for _, addr := range b.subConns.Keys() { + // addr was removed by resolver. + if _, ok := addrsSet.Get(addr); !ok { + v, _ := b.subConns.Get(addr) + scInfo := v.(*subConn) + b.cc.RemoveSubConn(scInfo.sc) + b.subConns.Delete(addr) + addrsUpdated = true + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in UpdateSubConnState. + } + } + return addrsUpdated +} + +func (b *ringhashBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + b.logger.Infof("Received update from resolver, balancer config: %+v", pretty.ToJSON(s.BalancerConfig)) + newConfig, ok := s.BalancerConfig.(*LBConfig) + if !ok { + return fmt.Errorf("unexpected balancer config with type: %T", s.BalancerConfig) + } + + // If addresses were updated, whether it resulted in SubConn + // creation/deletion, or just weight update, we need to regenerate the ring + // and send a new picker. + regenerateRing := b.updateAddresses(s.ResolverState.Addresses) + + // If the ring configuration has changed, we need to regenerate the ring and + // send a new picker. + if b.config == nil || b.config.MinRingSize != newConfig.MinRingSize || b.config.MaxRingSize != newConfig.MaxRingSize { + regenerateRing = true + } + b.config = newConfig + + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + if regenerateRing { + // Ring creation is guaranteed to not fail because we call newRing() + // with a non-empty subConns map. + b.ring = newRing(b.subConns, b.config.MinRingSize, b.config.MaxRingSize, b.logger) + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + // Successful resolution; clear resolver error and return nil. + b.resolverErr = nil + return nil +} + +func (b *ringhashBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +// UpdateSubConnState updates the per-SubConn state stored in the ring, and also +// the aggregated state. +// +// It triggers an update to cc when: +// - the new state is TransientFailure, to update the error message +// - it's possible that this is a noop, but sending an extra update is easier +// than comparing errors +// +// - the aggregated state is changed +// - the same picker will be sent again, but this update may trigger a re-pick +// for some RPCs. +func (b *ringhashBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + b.logger.Infof("Handle SubConn state change: %p, %v", sc, s) + } + scs, ok := b.scStates[sc] + if !ok { + b.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) + return + } + oldSCState := scs.effectiveState() + scs.setState(s) + newSCState := scs.effectiveState() + b.logger.Infof("SubConn's effective old state was: %v, new state is %v", oldSCState, newSCState) + + b.state = b.csEvltr.recordTransition(oldSCState, newSCState) + + switch s { + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + if oldSCState != newSCState { + // Because the picker caches the state of the subconns, we always + // regenerate and update the picker when the effective SubConn state + // changes. + b.regeneratePicker() + b.logger.Infof("Pushing new state %v and picker %p", b.state, b.picker) + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + } + + switch b.state { + case connectivity.Connecting, connectivity.TransientFailure: + // When overall state is TransientFailure, we need to make sure at least + // one SubConn is attempting to connect, otherwise this balancer may + // never get picks if the parent is priority. + // + // Because we report Connecting as the overall state when only one + // SubConn is in TransientFailure, we do the same check for Connecting + // here. + // + // Note that this check also covers deleting SubConns due to address + // change. E.g. if the SubConn attempting to connect is deleted, and the + // overall state is TF. Since there must be at least one SubConn + // attempting to connect, we need to trigger one. But since the deleted + // SubConn will eventually send a shutdown update, this code will run + // and trigger the next SubConn to connect. + for _, v := range b.subConns.Values() { + sc := v.(*subConn) + if sc.isAttemptingToConnect() { + return + } + } + // Trigger a SubConn (this updated SubConn's next SubConn in the ring) + // to connect if nobody is attempting to connect. + sc := nextSkippingDuplicatesSubConn(b.ring, scs) + if sc != nil { + sc.queueConnect() + return + } + // This handles the edge case where we have a single subConn in the + // ring. nextSkippingDuplicatesSubCon() would have returned nil. We + // still need to ensure that some subConn is attempting to connect, in + // order to give the LB policy a chance to move out of + // TRANSIENT_FAILURE. Hence, we try connecting on the current subConn. + scs.queueConnect() + } +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *ringhashBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +func (b *ringhashBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = base.NewErrPicker(b.mergeErrors()) + return + } + b.picker = newPicker(b.ring, b.logger) +} + +func (b *ringhashBalancer) Close() { + b.logger.Infof("Shutdown") +} + +func (b *ringhashBalancer) ExitIdle() { + // ExitIdle implementation is a no-op because connections are either + // triggers from picks or from subConn state changes. +} + +// connectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type connectivityStateEvaluator struct { + sum uint64 + nums [5]uint64 +} + +// recordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If there is at least one subchannel in READY state, report READY. +// - If there are 2 or more subchannels in TRANSIENT_FAILURE state, report TRANSIENT_FAILURE. +// - If there is at least one subchannel in CONNECTING state, report CONNECTING. +// - If there is one subchannel in TRANSIENT_FAILURE and there is more than one subchannel, report state CONNECTING. +// - If there is at least one subchannel in Idle state, report Idle. +// - Otherwise, report TRANSIENT_FAILURE. +// +// Note that if there are 1 connecting, 2 transient failure, the overall state +// is transient failure. This is because the second transient failure is a +// fallback of the first failing SubConn, and we want to report transient +// failure to failover to the lower priority. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + cse.nums[state] += updateVal + } + if oldState == connectivity.Shutdown { + // There's technically no transition from Shutdown. But we record a + // Shutdown->Idle transition when a new SubConn is created. + cse.sum++ + } + if newState == connectivity.Shutdown { + cse.sum-- + } + + if cse.nums[connectivity.Ready] > 0 { + return connectivity.Ready + } + if cse.nums[connectivity.TransientFailure] > 1 { + return connectivity.TransientFailure + } + if cse.nums[connectivity.Connecting] > 0 { + return connectivity.Connecting + } + if cse.nums[connectivity.TransientFailure] > 0 && cse.sum > 1 { + return connectivity.Connecting + } + if cse.nums[connectivity.Idle] > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} + +// getWeightAttribute is a convenience function which returns the value of the +// weight attribute stored in the BalancerAttributes field of addr, using the +// weightedroundrobin package. +// +// When used in the xDS context, the weight attribute is guaranteed to be +// non-zero. But, when used in a non-xDS context, the weight attribute could be +// unset. A Default of 1 is used in the latter case. +func getWeightAttribute(addr resolver.Address) uint32 { + w := weightedroundrobin.GetAddrInfo(addr).Weight + if w == 0 { + return 1 + } + return w +} diff --git a/xds/internal/balancer/ringhash/ringhash_test.go b/xds/internal/balancer/ringhash/ringhash_test.go new file mode 100644 index 000000000000..e5b10556e982 --- /dev/null +++ b/xds/internal/balancer/ringhash/ringhash_test.go @@ -0,0 +1,551 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal" +) + +var ( + cmpOpts = cmp.Options{ + cmp.AllowUnexported(testutils.TestSubConn{}, ringEntry{}, subConn{}), + cmpopts.IgnoreFields(subConn{}, "mu"), + } +) + +const ( + defaultTestTimeout = 10 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond + + testBackendAddrsCount = 12 +) + +var ( + testBackendAddrStrs []string + testConfig = &LBConfig{MinRingSize: 1, MaxRingSize: 10} +) + +func init() { + for i := 0; i < testBackendAddrsCount; i++ { + testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) + } +} + +func ctxWithHash(h uint64) context.Context { + return SetRequestHash(context.Background(), h) +} + +// setupTest creates the balancer, and does an initial sanity check. +func setupTest(t *testing.T, addrs []resolver.Address) (*testutils.TestClientConn, balancer.Balancer, balancer.Picker) { + t.Helper() + cc := testutils.NewTestClientConn(t) + builder := balancer.Get(Name) + b := builder.Build(cc, balancer.BuildOptions{}) + if b == nil { + t.Fatalf("builder.Build(%s) failed and returned nil", Name) + } + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: addrs}, + BalancerConfig: testConfig, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + + for _, addr := range addrs { + addr1 := <-cc.NewSubConnAddrsCh + if want := []resolver.Address{addr}; !cmp.Equal(addr1, want, cmp.AllowUnexported(attributes.Attributes{})) { + t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr1, want, cmp.AllowUnexported(attributes.Attributes{}))) + } + sc1 := <-cc.NewSubConnCh + // All the SubConns start in Idle, and should not Connect(). + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + t.Errorf("unexpected Connect() from SubConn %v", sc1) + case <-time.After(defaultTestShortTimeout): + } + } + + // Should also have a picker, with all SubConns in Idle. + p1 := <-cc.NewPickerCh + return cc, b, p1 +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestUpdateClientConnState_NewRingSize tests the scenario where the ringhash +// LB policy receives new configuration which specifies new values for the ring +// min and max sizes. The test verifies that a new ring is created and a new +// picker is sent to the ClientConn. +func (s) TestUpdateClientConnState_NewRingSize(t *testing.T) { + origMinRingSize, origMaxRingSize := 1, 10 // Configured from `testConfig` in `setupTest` + newMinRingSize, newMaxRingSize := 20, 100 + + addrs := []resolver.Address{{Addr: testBackendAddrStrs[0]}} + cc, b, p1 := setupTest(t, addrs) + ring1 := p1.(*picker).ring + if ringSize := len(ring1.items); ringSize < origMinRingSize || ringSize > origMaxRingSize { + t.Fatalf("Ring created with size %d, want between [%d, %d]", ringSize, origMinRingSize, origMaxRingSize) + } + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: addrs}, + BalancerConfig: &LBConfig{MinRingSize: uint64(newMinRingSize), MaxRingSize: uint64(newMaxRingSize)}, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + + var ring2 *ring + select { + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout when waiting for a picker update after a configuration update") + case p2 := <-cc.NewPickerCh: + ring2 = p2.(*picker).ring + } + if ringSize := len(ring2.items); ringSize < newMinRingSize || ringSize > newMaxRingSize { + t.Fatalf("Ring created with size %d, want between [%d, %d]", ringSize, newMinRingSize, newMaxRingSize) + } +} + +func (s) TestOneSubConn(t *testing.T) { + wantAddr1 := resolver.Address{Addr: testBackendAddrStrs[0]} + cc, b, p0 := setupTest(t, []resolver.Address{wantAddr1}) + ring0 := p0.(*picker).ring + + firstHash := ring0.items[0].hash + // firstHash-1 will pick the first (and only) SubConn from the ring. + testHash := firstHash - 1 + // The first pick should be queued, and should trigger Connect() on the only + // SubConn. + if _, err := p0.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + sc0 := ring0.items[0].sc.sc + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // Send state updates to Ready. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // Test pick with one backend. + p1 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } +} + +// TestThreeBackendsAffinity covers that there are 3 SubConns, RPCs with the +// same hash always pick the same SubConn. When the one picked is down, another +// one will be picked. +func (s) TestThreeSubConnsAffinity(t *testing.T) { + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + {Addr: testBackendAddrStrs[2]}, + } + cc, b, p0 := setupTest(t, wantAddrs) + // This test doesn't update addresses, so this ring will be used by all the + // pickers. + ring0 := p0.(*picker).ring + + firstHash := ring0.items[0].hash + // firstHash+1 will pick the second SubConn from the ring. + testHash := firstHash + 1 + // The first pick should be queued, and should trigger Connect() on the only + // SubConn. + if _, err := p0.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + // The picked SubConn should be the second in the ring. + sc0 := ring0.items[1].sc.sc + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // Send state updates to Ready. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p1 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } + + // Turn down the subConn in use. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + p2 := <-cc.NewPickerCh + // Pick with the same hash should be queued, because the SubConn after the + // first picked is Idle. + if _, err := p2.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + + // The third SubConn in the ring should connect. + sc1 := ring0.items[2].sc.sc + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc1) + } + + // Send state updates to Ready. + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // New picks should all return this SubConn. + p3 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p3.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + + // Now, after backoff, the first picked SubConn will turn Idle. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + // The picks above should have queued Connect() for the first picked + // SubConn, so this Idle state change will trigger a Connect(). + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // After the first picked SubConn turn Ready, new picks should return it + // again (even though the second picked SubConn is also Ready). + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + p4 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p4.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } +} + +// TestThreeBackendsAffinity covers that there are 3 SubConns, RPCs with the +// same hash always pick the same SubConn. Then try different hash to pick +// another backend, and verify the first hash still picks the first backend. +func (s) TestThreeSubConnsAffinityMultiple(t *testing.T) { + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + {Addr: testBackendAddrStrs[2]}, + } + cc, b, p0 := setupTest(t, wantAddrs) + // This test doesn't update addresses, so this ring will be used by all the + // pickers. + ring0 := p0.(*picker).ring + + firstHash := ring0.items[0].hash + // firstHash+1 will pick the second SubConn from the ring. + testHash := firstHash + 1 + // The first pick should be queued, and should trigger Connect() on the only + // SubConn. + if _, err := p0.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + sc0 := ring0.items[1].sc.sc + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // Send state updates to Ready. + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // First hash should always pick sc0. + p1 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } + + secondHash := ring0.items[1].hash + // secondHash+1 will pick the third SubConn from the ring. + testHash2 := secondHash + 1 + if _, err := p0.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash2)}); err != balancer.ErrNoSubConnAvailable { + t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable) + } + sc1 := ring0.items[2].sc.sc + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestTimeout): + t.Errorf("timeout waiting for Connect() from SubConn %v", sc1) + } + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) + + // With the new generated picker, hash2 always picks sc1. + p2 := <-cc.NewPickerCh + for i := 0; i < 5; i++ { + gotSCSt, _ := p2.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash2)}) + if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) + } + } + // But the first hash still picks sc0. + for i := 0; i < 5; i++ { + gotSCSt, _ := p2.Pick(balancer.PickInfo{Ctx: ctxWithHash(testHash)}) + if !cmp.Equal(gotSCSt.SubConn, sc0, cmp.AllowUnexported(testutils.TestSubConn{})) { + t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0) + } + } +} + +func (s) TestAddrWeightChange(t *testing.T) { + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + {Addr: testBackendAddrStrs[2]}, + } + cc, b, p0 := setupTest(t, wantAddrs) + ring0 := p0.(*picker).ring + + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: wantAddrs}, + BalancerConfig: testConfig, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + select { + case <-cc.NewPickerCh: + t.Fatalf("unexpected picker after UpdateClientConn with the same addresses") + case <-time.After(defaultTestShortTimeout): + } + + // Delete an address, should send a new Picker. + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + }}, + BalancerConfig: testConfig, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + var p1 balancer.Picker + select { + case p1 = <-cc.NewPickerCh: + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for picker after UpdateClientConn with different addresses") + } + ring1 := p1.(*picker).ring + if ring1 == ring0 { + t.Fatalf("new picker after removing address has the same ring as before, want different") + } + + // Another update with the same addresses, but different weight. + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + weightedroundrobin.SetAddrInfo( + resolver.Address{Addr: testBackendAddrStrs[1]}, + weightedroundrobin.AddrInfo{Weight: 2}), + }}, + BalancerConfig: testConfig, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + var p2 balancer.Picker + select { + case p2 = <-cc.NewPickerCh: + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for picker after UpdateClientConn with different addresses") + } + if p2.(*picker).ring == ring1 { + t.Fatalf("new picker after changing address weight has the same ring as before, want different") + } +} + +// TestSubConnToConnectWhenOverallTransientFailure covers the situation when the +// overall state is TransientFailure, the SubConns turning Idle will trigger the +// next SubConn in the ring to Connect(). But not when the overall state is not +// TransientFailure. +func (s) TestSubConnToConnectWhenOverallTransientFailure(t *testing.T) { + wantAddrs := []resolver.Address{ + {Addr: testBackendAddrStrs[0]}, + {Addr: testBackendAddrStrs[1]}, + {Addr: testBackendAddrStrs[2]}, + } + _, b, p0 := setupTest(t, wantAddrs) + ring0 := p0.(*picker).ring + + // Turn the first subconn to transient failure. + sc0 := ring0.items[0].sc.sc + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + b.UpdateSubConnState(sc0, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + + // It will trigger the second subconn to connect (because overall state is + // Connect (when one subconn is TF)). + sc1 := ring0.items[1].sc.sc + select { + case <-sc1.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Fatalf("timeout waiting for Connect() from SubConn %v", sc1) + } + + // Turn the second subconn to TF. This will set the overall state to TF. + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + b.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + + // It will trigger the third subconn to connect. + sc2 := ring0.items[2].sc.sc + select { + case <-sc2.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Fatalf("timeout waiting for Connect() from SubConn %v", sc2) + } + + // Turn the third subconn to TF. This will set the overall state to TF. + b.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + b.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + + // It will trigger the first subconn to connect. + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + case <-time.After(defaultTestShortTimeout): + t.Fatalf("timeout waiting for Connect() from SubConn %v", sc0) + } + + // Turn the third subconn to TF again. + b.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.TransientFailure}) + b.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Idle}) + + // This will not trigger any new Connect() on the SubConns, because sc0 is + // still attempting to connect, and we only need one SubConn to connect. + select { + case <-sc0.(*testutils.TestSubConn).ConnectCh: + t.Fatalf("unexpected Connect() from SubConn %v", sc0) + case <-sc1.(*testutils.TestSubConn).ConnectCh: + t.Fatalf("unexpected Connect() from SubConn %v", sc1) + case <-sc2.(*testutils.TestSubConn).ConnectCh: + t.Fatalf("unexpected Connect() from SubConn %v", sc2) + case <-time.After(defaultTestShortTimeout): + } +} + +func (s) TestConnectivityStateEvaluatorRecordTransition(t *testing.T) { + tests := []struct { + name string + from, to []connectivity.State + want connectivity.State + }{ + { + name: "one ready", + from: []connectivity.State{connectivity.Idle}, + to: []connectivity.State{connectivity.Ready}, + want: connectivity.Ready, + }, + { + name: "one connecting", + from: []connectivity.State{connectivity.Idle}, + to: []connectivity.State{connectivity.Connecting}, + want: connectivity.Connecting, + }, + { + name: "one ready one transient failure", + from: []connectivity.State{connectivity.Idle, connectivity.Idle}, + to: []connectivity.State{connectivity.Ready, connectivity.TransientFailure}, + want: connectivity.Ready, + }, + { + name: "one connecting one transient failure", + from: []connectivity.State{connectivity.Idle, connectivity.Idle}, + to: []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, + want: connectivity.Connecting, + }, + { + name: "one connecting two transient failure", + from: []connectivity.State{connectivity.Idle, connectivity.Idle, connectivity.Idle}, + to: []connectivity.State{connectivity.Connecting, connectivity.TransientFailure, connectivity.TransientFailure}, + want: connectivity.TransientFailure, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cse := &connectivityStateEvaluator{} + var got connectivity.State + for i, fff := range tt.from { + ttt := tt.to[i] + got = cse.recordTransition(fff, ttt) + } + if got != tt.want { + t.Errorf("recordTransition() = %v, want %v", got, tt.want) + } + }) + } +} + +// TestAddrBalancerAttributesChange tests the case where the ringhash balancer +// receives a ClientConnUpdate with the same config and addresses as received in +// the previous update. Although the `BalancerAttributes` contents are the same, +// the pointer is different. This test verifies that subConns are not recreated +// in this scenario. +func (s) TestAddrBalancerAttributesChange(t *testing.T) { + addrs1 := []resolver.Address{internal.SetLocalityID(resolver.Address{Addr: testBackendAddrStrs[0]}, internal.LocalityID{Region: "americas"})} + cc, b, _ := setupTest(t, addrs1) + + addrs2 := []resolver.Address{internal.SetLocalityID(resolver.Address{Addr: testBackendAddrStrs[0]}, internal.LocalityID{Region: "americas"})} + if err := b.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: resolver.State{Addresses: addrs2}, + BalancerConfig: testConfig, + }); err != nil { + t.Fatalf("UpdateClientConnState returned err: %v", err) + } + select { + case <-cc.NewSubConnCh: + t.Fatal("new subConn created for an update with the same addresses") + case <-time.After(defaultTestShortTimeout): + } +} diff --git a/xds/internal/balancer/ringhash/util.go b/xds/internal/balancer/ringhash/util.go new file mode 100644 index 000000000000..92bb3ae5b791 --- /dev/null +++ b/xds/internal/balancer/ringhash/util.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package ringhash + +import "context" + +type clusterKey struct{} + +func getRequestHash(ctx context.Context) uint64 { + requestHash, _ := ctx.Value(clusterKey{}).(uint64) + return requestHash +} + +// GetRequestHashForTesting returns the request hash in the context; to be used +// for testing only. +func GetRequestHashForTesting(ctx context.Context) uint64 { + return getRequestHash(ctx) +} + +// SetRequestHash adds the request hash to the context for use in Ring Hash Load +// Balancing. +func SetRequestHash(ctx context.Context, requestHash uint64) context.Context { + return context.WithValue(ctx, clusterKey{}, requestHash) +} diff --git a/xds/internal/balancer/weightedtarget/weightedtarget_test.go b/xds/internal/balancer/weightedtarget/weightedtarget_test.go deleted file mode 100644 index 7f9e566ca5b5..000000000000 --- a/xds/internal/balancer/weightedtarget/weightedtarget_test.go +++ /dev/null @@ -1,225 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package weightedtarget - -import ( - "encoding/json" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/attributes" - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/roundrobin" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/hierarchy" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" - "google.golang.org/grpc/xds/internal/balancer/balancergroup" - "google.golang.org/grpc/xds/internal/testutils" -) - -type testConfigBalancerBuilder struct { - balancer.Builder -} - -func newTestConfigBalancerBuilder() *testConfigBalancerBuilder { - return &testConfigBalancerBuilder{ - Builder: balancer.Get(roundrobin.Name), - } -} - -func (t *testConfigBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - rr := t.Builder.Build(cc, opts) - return &testConfigBalancer{ - Balancer: rr, - } -} - -const testConfigBalancerName = "test_config_balancer" - -func (t *testConfigBalancerBuilder) Name() string { - return testConfigBalancerName -} - -type stringBalancerConfig struct { - serviceconfig.LoadBalancingConfig - s string -} - -func (t *testConfigBalancerBuilder) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - // Return string without quotes. - return stringBalancerConfig{s: string(c[1 : len(c)-1])}, nil -} - -// testConfigBalancer is a roundrobin balancer, but it takes the balancer config -// string and append it to the backend addresses. -type testConfigBalancer struct { - balancer.Balancer -} - -func (b *testConfigBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - c, ok := s.BalancerConfig.(stringBalancerConfig) - if !ok { - return fmt.Errorf("unexpected balancer config with type %T", s.BalancerConfig) - } - oneMoreAddr := resolver.Address{Addr: c.s} - s.BalancerConfig = nil - s.ResolverState.Addresses = append(s.ResolverState.Addresses, oneMoreAddr) - return b.Balancer.UpdateClientConnState(s) -} - -func (b *testConfigBalancer) Close() { - b.Balancer.Close() -} - -var ( - wtbBuilder balancer.Builder - wtbParser balancer.ConfigParser - testBackendAddrStrs []string -) - -const testBackendAddrsCount = 12 - -func init() { - balancer.Register(newTestConfigBalancerBuilder()) - for i := 0; i < testBackendAddrsCount; i++ { - testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)) - } - wtbBuilder = balancer.Get(weightedTargetName) - wtbParser = wtbBuilder.(balancer.ConfigParser) - - balancergroup.DefaultSubBalancerCloseTimeout = time.Millisecond -} - -// TestWeightedTarget covers the cases that a sub-balancer is added and a -// sub-balancer is removed. It verifies that the addresses and balancer configs -// are forwarded to the right sub-balancer. -// -// This test is intended to test the glue code in weighted_target. Most of the -// functionality tests are covered by the balancer group tests. -func TestWeightedTarget(t *testing.T) { - cc := testutils.NewTestClientConn(t) - wtb := wtbBuilder.Build(cc, balancer.BuildOptions{}) - - // Start with "cluster_1: round_robin". - config1, err := wtbParser.ParseConfig([]byte(`{"targets":{"cluster_1":{"weight":1,"childPolicy":[{"round_robin":""}]}}}`)) - if err != nil { - t.Fatalf("failed to parse balancer config: %v", err) - } - - // Send the config, and an address with hierarchy path ["cluster_1"]. - wantAddr1 := resolver.Address{Addr: testBackendAddrStrs[0], Attributes: nil} - if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddr1, []string{"cluster_1"}), - }}, - BalancerConfig: config1, - }); err != nil { - t.Fatalf("failed to update ClientConn state: %v", err) - } - - // Verify that a subconn is created with the address, and the hierarchy path - // in the address is cleared. - addr1 := <-cc.NewSubConnAddrsCh - if want := []resolver.Address{ - hierarchy.Set(wantAddr1, []string{}), - }; !cmp.Equal(addr1, want, cmp.AllowUnexported(attributes.Attributes{})) { - t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr1, want, cmp.AllowUnexported(attributes.Attributes{}))) - } - - // Send subconn state change. - sc1 := <-cc.NewSubConnCh - wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - wtb.UpdateSubConnState(sc1, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test pick with one backend. - p1 := <-cc.NewPickerCh - for i := 0; i < 5; i++ { - gotSCSt, _ := p1.Pick(balancer.PickInfo{}) - if !cmp.Equal(gotSCSt.SubConn, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1) - } - } - - // Remove cluster_1, and add "cluster_2: test_config_balancer". - wantAddr3Str := testBackendAddrStrs[2] - config2, err := wtbParser.ParseConfig([]byte( - fmt.Sprintf(`{"targets":{"cluster_2":{"weight":1,"childPolicy":[{%q:%q}]}}}`, testConfigBalancerName, wantAddr3Str), - )) - if err != nil { - t.Fatalf("failed to parse balancer config: %v", err) - } - - // Send the config, and one address with hierarchy path "cluster_2". - wantAddr2 := resolver.Address{Addr: testBackendAddrStrs[1], Attributes: nil} - if err := wtb.UpdateClientConnState(balancer.ClientConnState{ - ResolverState: resolver.State{Addresses: []resolver.Address{ - hierarchy.Set(wantAddr2, []string{"cluster_2"}), - }}, - BalancerConfig: config2, - }); err != nil { - t.Fatalf("failed to update ClientConn state: %v", err) - } - - // Expect the address sent in the address list. The hierarchy path should be - // cleared. - addr2 := <-cc.NewSubConnAddrsCh - if want := []resolver.Address{ - hierarchy.Set(wantAddr2, []string{}), - }; !cmp.Equal(addr2, want, cmp.AllowUnexported(attributes.Attributes{})) { - t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr2, want, cmp.AllowUnexported(attributes.Attributes{}))) - } - // Expect the other address sent as balancer config. This address doesn't - // have hierarchy path. - wantAddr3 := resolver.Address{Addr: wantAddr3Str, Attributes: nil} - addr3 := <-cc.NewSubConnAddrsCh - if want := []resolver.Address{wantAddr3}; !cmp.Equal(addr3, want, cmp.AllowUnexported(attributes.Attributes{})) { - t.Fatalf("got unexpected new subconn addrs: %v", cmp.Diff(addr3, want, cmp.AllowUnexported(attributes.Attributes{}))) - } - - // The subconn for cluster_1 should be removed. - scToRemove := <-cc.RemoveSubConnCh - if !cmp.Equal(scToRemove, sc1, cmp.AllowUnexported(testutils.TestSubConn{})) { - t.Fatalf("RemoveSubConn, want %v, got %v", sc1, scToRemove) - } - wtb.UpdateSubConnState(scToRemove, balancer.SubConnState{ConnectivityState: connectivity.Shutdown}) - - sc2 := <-cc.NewSubConnCh - wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - wtb.UpdateSubConnState(sc2, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - sc3 := <-cc.NewSubConnCh - wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Connecting}) - wtb.UpdateSubConnState(sc3, balancer.SubConnState{ConnectivityState: connectivity.Ready}) - - // Test roundrobin pick with backends in cluster_2. - p2 := <-cc.NewPickerCh - want := []balancer.SubConn{sc2, sc3} - if err := testutils.IsRoundRobin(want, subConnFromPicker(p2)); err != nil { - t.Fatalf("want %v, got %v", want, err) - } -} - -func subConnFromPicker(p balancer.Picker) func() balancer.SubConn { - return func() balancer.SubConn { - scst, _ := p.Pick(balancer.PickInfo{}) - return scst.SubConn - } -} diff --git a/xds/internal/balancer/wrrlocality/balancer.go b/xds/internal/balancer/wrrlocality/balancer.go new file mode 100644 index 000000000000..4df2e4ed0086 --- /dev/null +++ b/xds/internal/balancer/wrrlocality/balancer.go @@ -0,0 +1,201 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package wrrlocality provides an implementation of the wrr locality LB policy, +// as defined in [A52 - xDS Custom LB Policies]. +// +// [A52 - xDS Custom LB Policies]: https://github.com/grpc/proposal/blob/master/A52-xds-custom-lb-policies.md +package wrrlocality + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/grpclog" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal" +) + +// Name is the name of wrr_locality balancer. +const Name = "xds_wrr_locality_experimental" + +func init() { + balancer.Register(bb{}) +} + +type bb struct{} + +func (bb) Name() string { + return Name +} + +// LBConfig is the config for the wrr locality balancer. +type LBConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + // ChildPolicy is the config for the child policy. + ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"` +} + +// To plumb in a different child in tests. +var weightedTargetName = weightedtarget.Name + +func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + builder := balancer.Get(weightedTargetName) + if builder == nil { + // Shouldn't happen, registered through imported weighted target, + // defensive programming. + return nil + } + + // Doesn't need to intercept any balancer.ClientConn operations; pass + // through by just giving cc to child balancer. + wtb := builder.Build(cc, bOpts) + if wtb == nil { + // shouldn't happen, defensive programming. + return nil + } + wtbCfgParser, ok := builder.(balancer.ConfigParser) + if !ok { + // Shouldn't happen, imported weighted target builder has this method. + return nil + } + wrrL := &wrrLocalityBalancer{ + child: wtb, + childParser: wtbCfgParser, + } + + wrrL.logger = prefixLogger(wrrL) + wrrL.logger.Infof("Created") + return wrrL +} + +func (bb) ParseConfig(s json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg *LBConfig + if err := json.Unmarshal(s, &lbCfg); err != nil { + return nil, fmt.Errorf("xds_wrr_locality: invalid LBConfig: %s, error: %v", string(s), err) + } + if lbCfg == nil || lbCfg.ChildPolicy == nil { + return nil, errors.New("xds_wrr_locality: invalid LBConfig: child policy field must be set") + } + return lbCfg, nil +} + +type attributeKey struct{} + +// Equal allows the values to be compared by Attributes.Equal. +func (a AddrInfo) Equal(o interface{}) bool { + oa, ok := o.(AddrInfo) + return ok && oa.LocalityWeight == a.LocalityWeight +} + +// AddrInfo is the locality weight of the locality an address is a part of. +type AddrInfo struct { + LocalityWeight uint32 +} + +// SetAddrInfo returns a copy of addr in which the BalancerAttributes field is +// updated with AddrInfo. +func SetAddrInfo(addr resolver.Address, addrInfo AddrInfo) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(attributeKey{}, addrInfo) + return addr +} + +func (a AddrInfo) String() string { + return fmt.Sprintf("Locality Weight: %d", a.LocalityWeight) +} + +// getAddrInfo returns the AddrInfo stored in the BalancerAttributes field of +// addr. Returns false if no AddrInfo found. +func getAddrInfo(addr resolver.Address) (AddrInfo, bool) { + v := addr.BalancerAttributes.Value(attributeKey{}) + ai, ok := v.(AddrInfo) + return ai, ok +} + +// wrrLocalityBalancer wraps a weighted target balancer, and builds +// configuration for the weighted target once it receives configuration +// specifying the weighted target child balancer and locality weight +// information. +type wrrLocalityBalancer struct { + // child will be a weighted target balancer, and will be built it at + // wrrLocalityBalancer build time. Other than preparing configuration, other + // balancer operations are simply pass through. + child balancer.Balancer + + childParser balancer.ConfigParser + + logger *grpclog.PrefixLogger +} + +func (b *wrrLocalityBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + lbCfg, ok := s.BalancerConfig.(*LBConfig) + if !ok { + b.logger.Errorf("Received config with unexpected type %T: %v", s.BalancerConfig, s.BalancerConfig) + return balancer.ErrBadResolverState + } + + weightedTargets := make(map[string]weightedtarget.Target) + for _, addr := range s.ResolverState.Addresses { + // This get of LocalityID could potentially return a zero value. This + // shouldn't happen though (this attribute that is set actually gets + // used to build localities in the first place), and thus don't error + // out, and just build a weighted target with undefined behavior. + locality, err := internal.GetLocalityID(addr).ToString() + if err != nil { + // Should never happen. + logger.Errorf("Failed to marshal LocalityID: %v, skipping this locality in weighted target") + } + ai, ok := getAddrInfo(addr) + if !ok { + return fmt.Errorf("xds_wrr_locality: missing locality weight information in address %q", addr) + } + weightedTargets[locality] = weightedtarget.Target{Weight: ai.LocalityWeight, ChildPolicy: lbCfg.ChildPolicy} + } + wtCfg := &weightedtarget.LBConfig{Targets: weightedTargets} + wtCfgJSON, err := json.Marshal(wtCfg) + if err != nil { + // Shouldn't happen. + return fmt.Errorf("xds_wrr_locality: error marshalling prepared config: %v", wtCfg) + } + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childParser.ParseConfig(wtCfgJSON); err != nil { + return fmt.Errorf("xds_wrr_locality: config generated %v is invalid: %v", wtCfgJSON, err) + } + + return b.child.UpdateClientConnState(balancer.ClientConnState{ + ResolverState: s.ResolverState, + BalancerConfig: sc, + }) +} + +func (b *wrrLocalityBalancer) ResolverError(err error) { + b.child.ResolverError(err) +} + +func (b *wrrLocalityBalancer) UpdateSubConnState(sc balancer.SubConn, scState balancer.SubConnState) { + b.child.UpdateSubConnState(sc, scState) +} + +func (b *wrrLocalityBalancer) Close() { + b.child.Close() +} diff --git a/xds/internal/balancer/wrrlocality/balancer_test.go b/xds/internal/balancer/wrrlocality/balancer_test.go new file mode 100644 index 000000000000..f0da7413bdb8 --- /dev/null +++ b/xds/internal/balancer/wrrlocality/balancer_test.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package wrrlocality + +import ( + "context" + "encoding/json" + "errors" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedtarget" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/grpctest" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal" +) + +const ( + defaultTestTimeout = 5 * time.Second +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestParseConfig(t *testing.T) { + const errParseConfigName = "errParseConfigBalancer" + stub.Register(errParseConfigName, stub.BalancerFuncs{ + ParseConfig: func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return nil, errors.New("some error") + }, + }) + + parser := bb{} + tests := []struct { + name string + input string + wantCfg serviceconfig.LoadBalancingConfig + wantErr string + }{ + { + name: "happy-case-round robin-child", + input: `{"childPolicy": [{"round_robin": {}}]}`, + wantCfg: &LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: roundrobin.Name, + }, + }, + }, + { + name: "invalid-json", + input: "{{invalidjson{{", + wantErr: "invalid character", + }, + + { + name: "child-policy-field-isn't-set", + input: `{}`, + wantErr: "child policy field must be set", + }, + { + name: "child-policy-type-is-empty", + input: `{"childPolicy": []}`, + wantErr: "invalid loadBalancingConfig: no supported policies found in []", + }, + { + name: "child-policy-empty-config", + input: `{"childPolicy": [{"": {}}]}`, + wantErr: "invalid loadBalancingConfig: no supported policies found in []", + }, + { + name: "child-policy-type-isn't-registered", + input: `{"childPolicy": [{"doesNotExistBalancer": {"cluster": "test_cluster"}}]}`, + wantErr: "invalid loadBalancingConfig: no supported policies found in [doesNotExistBalancer]", + }, + { + name: "child-policy-config-is-invalid", + input: `{"childPolicy": [{"errParseConfigBalancer": {"cluster": "test_cluster"}}]}`, + wantErr: "error parsing loadBalancingConfig for policy \"errParseConfigBalancer\"", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gotCfg, gotErr := parser.ParseConfig(json.RawMessage(test.input)) + // Substring match makes this very tightly coupled to the + // internalserviceconfig.BalancerConfig error strings. However, it + // is important to distinguish the different types of error messages + // possible as the parser has a few defined buckets of ways it can + // error out. + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("ParseConfig(%v) = %v, wantErr %v", test.input, gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("ParseConfig(%v) = %v, wantErr %v", test.input, gotErr, test.wantErr) + } + if test.wantErr != "" { + return + } + if diff := cmp.Diff(gotCfg, test.wantCfg); diff != "" { + t.Fatalf("ParseConfig(%v) got unexpected output, diff (-got +want): %v", test.input, diff) + } + }) + } +} + +// TestUpdateClientConnState tests the UpdateClientConnState method of the +// wrr_locality_experimental balancer. This UpdateClientConn operation should +// take the localities and their weights in the addresses passed in, alongside +// the endpoint picking policy defined in the Balancer Config and construct a +// weighted target configuration corresponding to these inputs. +func (s) TestUpdateClientConnState(t *testing.T) { + // Configure the stub balancer defined below as the child policy of + // wrrLocalityBalancer. + cfgCh := testutils.NewChannel() + oldWeightedTargetName := weightedTargetName + defer func() { + weightedTargetName = oldWeightedTargetName + }() + weightedTargetName = "fake_weighted_target" + stub.Register("fake_weighted_target", stub.BalancerFuncs{ + ParseConfig: func(c json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg weightedtarget.LBConfig + if err := json.Unmarshal(c, &cfg); err != nil { + return nil, err + } + return &cfg, nil + }, + UpdateClientConnState: func(bd *stub.BalancerData, ccs balancer.ClientConnState) error { + wtCfg, ok := ccs.BalancerConfig.(*weightedtarget.LBConfig) + if !ok { + return errors.New("child received config that was not a weighted target config") + } + defer cfgCh.Send(wtCfg) + return nil + }, + }) + + builder := balancer.Get(Name) + if builder == nil { + t.Fatalf("balancer.Get(%q) returned nil", Name) + } + tcc := testutils.NewTestClientConn(t) + bal := builder.Build(tcc, balancer.BuildOptions{}) + defer bal.Close() + wrrL := bal.(*wrrLocalityBalancer) + + // Create the addresses with two localities with certain locality weights. + // This represents what addresses the wrr_locality balancer will receive in + // UpdateClientConnState. + addr1 := resolver.Address{ + Addr: "locality-1", + } + addr1 = internal.SetLocalityID(addr1, internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }) + addr1 = SetAddrInfo(addr1, AddrInfo{LocalityWeight: 2}) + + addr2 := resolver.Address{ + Addr: "locality-2", + } + addr2 = internal.SetLocalityID(addr2, internal.LocalityID{ + Region: "region-2", + Zone: "zone-2", + SubZone: "subzone-2", + }) + addr2 = SetAddrInfo(addr2, AddrInfo{LocalityWeight: 1}) + addrs := []resolver.Address{addr1, addr2} + + err := wrrL.UpdateClientConnState(balancer.ClientConnState{ + BalancerConfig: &LBConfig{ + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + ResolverState: resolver.State{ + Addresses: addrs, + }, + }) + if err != nil { + t.Fatalf("Unexpected error from UpdateClientConnState: %v", err) + } + + // Note that these inline strings declared as the key in Targets built from + // Locality ID are not exactly what is shown in the example in the gRFC. + // However, this is an implementation detail that does not affect + // correctness (confirmed with Java team). The important thing is to get + // those three pieces of information region, zone, and subzone down to the + // child layer. + wantWtCfg := &weightedtarget.LBConfig{ + Targets: map[string]weightedtarget.Target{ + "{\"region\":\"region-1\",\"zone\":\"zone-1\",\"subZone\":\"subzone-1\"}": { + Weight: 2, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + "{\"region\":\"region-2\",\"zone\":\"zone-2\",\"subZone\":\"subzone-2\"}": { + Weight: 1, + ChildPolicy: &internalserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cfg, err := cfgCh.Receive(ctx) + if err != nil { + t.Fatalf("No signal received from UpdateClientConnState() on the child: %v", err) + } + + gotWtCfg, ok := cfg.(*weightedtarget.LBConfig) + if !ok { + // Shouldn't happen - only sends a config on this channel. + t.Fatalf("Unexpected config type: %T", gotWtCfg) + } + + if diff := cmp.Diff(gotWtCfg, wantWtCfg); diff != "" { + t.Fatalf("Child received unexpected config, diff (-got, +want): %v", diff) + } +} diff --git a/xds/internal/balancer/edsbalancer/logging.go b/xds/internal/balancer/wrrlocality/logging.go similarity index 82% rename from xds/internal/balancer/edsbalancer/logging.go rename to xds/internal/balancer/wrrlocality/logging.go index be4d0a512d16..42ccea0a92b2 100644 --- a/xds/internal/balancer/edsbalancer/logging.go +++ b/xds/internal/balancer/wrrlocality/logging.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2023 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ * */ -package edsbalancer +package wrrlocality import ( "fmt" @@ -25,10 +25,10 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" ) -const prefix = "[eds-lb %p] " +const prefix = "[wrrlocality-lb %p] " var logger = grpclog.Component("xds") -func prefixLogger(p *edsBalancer) *internalgrpclog.PrefixLogger { +func prefixLogger(p *wrrLocalityBalancer) *internalgrpclog.PrefixLogger { return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(prefix, p)) } diff --git a/xds/internal/client/bootstrap/bootstrap.go b/xds/internal/client/bootstrap/bootstrap.go deleted file mode 100644 index f32c698b4f55..000000000000 --- a/xds/internal/client/bootstrap/bootstrap.go +++ /dev/null @@ -1,322 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package bootstrap provides the functionality to initialize certain aspects -// of an xDS client by reading a bootstrap file. -package bootstrap - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/google" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/credentials/tls/certprovider" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal/version" -) - -const ( - // The "server_features" field in the bootstrap file contains a list of - // features supported by the server. A value of "xds_v3" indicates that the - // server supports the v3 version of the xDS transport protocol. - serverFeaturesV3 = "xds_v3" - - // Type name for Google default credentials. - credsGoogleDefault = "google_default" - credsInsecure = "insecure" - gRPCUserAgentName = "gRPC Go" - clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" -) - -var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version) - -// For overriding in unit tests. -var bootstrapFileReadFunc = ioutil.ReadFile - -// Config provides the xDS client with several key bits of information that it -// requires in its interaction with the management server. The Config is -// initialized from the bootstrap file. -type Config struct { - // BalancerName is the name of the management server to connect to. - // - // The bootstrap file contains a list of servers (with name+creds), but we - // pick the first one. - BalancerName string - // Creds contains the credentials to be used while talking to the xDS - // server, as a grpc.DialOption. - Creds grpc.DialOption - // TransportAPI indicates the API version of xDS transport protocol to use. - // This describes the xDS gRPC endpoint and version of - // DiscoveryRequest/Response used on the wire. - TransportAPI version.TransportAPI - // NodeProto contains the Node proto to be used in xDS requests. The actual - // type depends on the transport protocol version used. - NodeProto proto.Message - // CertProviderConfigs contains a mapping from certificate provider plugin - // instance names to parsed buildable configs. - CertProviderConfigs map[string]*certprovider.BuildableConfig - // ServerListenerResourceNameTemplate is a template for the name of the - // Listener resource to subscribe to for a gRPC server. If the token `%s` is - // present in the string, it will be replaced with the server's listening - // "IP:port" (e.g., "0.0.0.0:8080", "[::]:8080"). For example, a value of - // "example/resource/%s" could become "example/resource/0.0.0.0:8080". - ServerListenerResourceNameTemplate string -} - -type channelCreds struct { - Type string `json:"type"` - Config json.RawMessage `json:"config"` -} - -type xdsServer struct { - ServerURI string `json:"server_uri"` - ChannelCreds []channelCreds `json:"channel_creds"` - ServerFeatures []string `json:"server_features"` -} - -func bootstrapConfigFromEnvVariable() ([]byte, error) { - fName := env.BootstrapFileName - fContent := env.BootstrapFileContent - - // Bootstrap file name has higher priority than bootstrap content. - if fName != "" { - // If file name is set - // - If file not found (or other errors), fail - // - Otherwise, use the content. - // - // Note that even if the content is invalid, we don't failover to the - // file content env variable. - logger.Debugf("xds: using bootstrap file with name %q", fName) - return bootstrapFileReadFunc(fName) - } - - if fContent != "" { - return []byte(fContent), nil - } - - return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined", env.BootstrapFileNameEnv, env.BootstrapFileContentEnv) -} - -// NewConfig returns a new instance of Config initialized by reading the -// bootstrap file found at ${GRPC_XDS_BOOTSTRAP}. -// -// The format of the bootstrap file will be as follows: -// { -// "xds_server": { -// "server_uri": , -// "channel_creds": [ -// { -// "type": , -// "config": -// } -// ], -// "server_features": [ ... ], -// }, -// "node": , -// "certificate_providers" : { -// "default": { -// "plugin_name": "default-plugin-name", -// "config": { default plugin config in JSON } -// }, -// "foo": { -// "plugin_name": "foo", -// "config": { foo plugin config in JSON } -// } -// }, -// "server_listener_resource_name_template": "grpc/server?xds.resource.listening_address=%s" -// } -// -// Currently, we support exactly one type of credential, which is -// "google_default", where we use the host's default certs for transport -// credentials and a Google oauth token for call credentials. -// -// This function tries to process as much of the bootstrap file as possible (in -// the presence of the errors) and may return a Config object with certain -// fields left unspecified, in which case the caller should use some sane -// defaults. -func NewConfig() (*Config, error) { - config := &Config{} - - data, err := bootstrapConfigFromEnvVariable() - if err != nil { - return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) - } - logger.Debugf("Bootstrap content: %s", data) - - var jsonData map[string]json.RawMessage - if err := json.Unmarshal(data, &jsonData); err != nil { - return nil, fmt.Errorf("xds: Failed to parse bootstrap config: %v", err) - } - - serverSupportsV3 := false - m := jsonpb.Unmarshaler{AllowUnknownFields: true} - for k, v := range jsonData { - switch k { - case "node": - // We unconditionally convert the JSON into a v3.Node proto. The v3 - // proto does not contain the deprecated field "build_version" from - // the v2 proto. We do not expect the bootstrap file to contain the - // "build_version" field. In any case, the unmarshal will succeed - // because we have set the `AllowUnknownFields` option on the - // unmarshaler. - n := &v3corepb.Node{} - if err := m.Unmarshal(bytes.NewReader(v), n); err != nil { - return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - config.NodeProto = n - case "xds_servers": - var servers []*xdsServer - if err := json.Unmarshal(v, &servers); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - if len(servers) < 1 { - return nil, fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any management server to connect to") - } - xs := servers[0] - config.BalancerName = xs.ServerURI - for _, cc := range xs.ChannelCreds { - // We stop at the first credential type that we support. - if cc.Type == credsGoogleDefault { - config.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials()) - break - } else if cc.Type == credsInsecure { - config.Creds = grpc.WithTransportCredentials(insecure.NewCredentials()) - break - } - } - for _, f := range xs.ServerFeatures { - switch f { - case serverFeaturesV3: - serverSupportsV3 = true - } - } - case "certificate_providers": - var providerInstances map[string]json.RawMessage - if err := json.Unmarshal(v, &providerInstances); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - configs := make(map[string]*certprovider.BuildableConfig) - getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) - for instance, data := range providerInstances { - var nameAndConfig struct { - PluginName string `json:"plugin_name"` - Config json.RawMessage `json:"config"` - } - if err := json.Unmarshal(data, &nameAndConfig); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err) - } - - name := nameAndConfig.PluginName - parser := getBuilder(nameAndConfig.PluginName) - if parser == nil { - // We ignore plugins that we do not know about. - continue - } - bc, err := parser.ParseConfig(nameAndConfig.Config) - if err != nil { - return nil, fmt.Errorf("xds: Config parsing for plugin %q failed: %v", name, err) - } - configs[instance] = bc - } - config.CertProviderConfigs = configs - case "server_listener_resource_name_template": - if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - } - // Do not fail the xDS bootstrap when an unknown field is seen. This can - // happen when an older version client reads a newer version bootstrap - // file with new fields. - } - - if config.BalancerName == "" { - return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) - } - if config.Creds == nil { - return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) - } - - // We end up using v3 transport protocol version only if the server supports - // v3, indicated by the presence of "xds_v3" in server_features. The default - // value of the enum type "version.TransportAPI" is v2. - if serverSupportsV3 { - config.TransportAPI = version.TransportV3 - } - - if err := config.updateNodeProto(); err != nil { - return nil, err - } - logger.Infof("Bootstrap config for creating xds-client: %+v", config) - return config, nil -} - -// updateNodeProto updates the node proto read from the bootstrap file. -// -// Node proto in Config contains a v3.Node protobuf message corresponding to the -// JSON contents found in the bootstrap file. This method performs some post -// processing on it: -// 1. If we don't find a nodeProto in the bootstrap file, we create an empty one -// here. That way, callers of this function can always expect that the NodeProto -// field is non-nil. -// 2. If the transport protocol version to be used is not v3, we convert the -// current v3.Node proto in a v2.Node proto. -// 3. Some additional fields which are not expected to be set in the bootstrap -// file are populated here. -func (c *Config) updateNodeProto() error { - if c.TransportAPI == version.TransportV3 { - v3, _ := c.NodeProto.(*v3corepb.Node) - if v3 == nil { - v3 = &v3corepb.Node{} - } - v3.UserAgentName = gRPCUserAgentName - v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning) - c.NodeProto = v3 - return nil - } - - v2 := &v2corepb.Node{} - if c.NodeProto != nil { - v3, err := proto.Marshal(c.NodeProto) - if err != nil { - return fmt.Errorf("xds: proto.Marshal(%v): %v", c.NodeProto, err) - } - if err := proto.Unmarshal(v3, v2); err != nil { - return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3, err) - } - } - c.NodeProto = v2 - - // BuildVersion is deprecated, and is replaced by user_agent_name and - // user_agent_version. But the management servers are still using the old - // field, so we will keep both set. - v2.BuildVersion = gRPCVersion - v2.UserAgentName = gRPCUserAgentName - v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - v2.ClientFeatures = append(v2.ClientFeatures, clientFeatureNoOverprovisioning) - return nil -} diff --git a/xds/internal/client/callback.go b/xds/internal/client/callback.go deleted file mode 100644 index da8e2f62d6c0..000000000000 --- a/xds/internal/client/callback.go +++ /dev/null @@ -1,272 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -type watcherInfoWithUpdate struct { - wi *watchInfo - update interface{} - err error -} - -// scheduleCallback should only be called by methods of watchInfo, which checks -// for watcher states and maintain consistency. -func (c *clientImpl) scheduleCallback(wi *watchInfo, update interface{}, err error) { - c.updateCh.Put(&watcherInfoWithUpdate{ - wi: wi, - update: update, - err: err, - }) -} - -func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) { - c.mu.Lock() - // Use a closure to capture the callback and type assertion, to save one - // more switch case. - // - // The callback must be called without c.mu. Otherwise if the callback calls - // another watch() inline, it will cause a deadlock. This leaves a small - // window that a watcher's callback could be called after the watcher is - // canceled, and the user needs to take care of it. - var ccb func() - switch wiu.wi.rType { - case ListenerResource: - if s, ok := c.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.ldsCallback(wiu.update.(ListenerUpdate), wiu.err) } - } - case RouteConfigResource: - if s, ok := c.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.rdsCallback(wiu.update.(RouteConfigUpdate), wiu.err) } - } - case ClusterResource: - if s, ok := c.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.cdsCallback(wiu.update.(ClusterUpdate), wiu.err) } - } - case EndpointsResource: - if s, ok := c.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] { - ccb = func() { wiu.wi.edsCallback(wiu.update.(EndpointsUpdate), wiu.err) } - } - } - c.mu.Unlock() - - if ccb != nil { - ccb() - } -} - -// NewListeners is called by the underlying xdsAPIClient when it receives an -// xDS response. -// -// A response can contain multiple resources. They will be parsed and put in a -// map from resource name to the resource content. -func (c *clientImpl) NewListeners(updates map[string]ListenerUpdate, metadata UpdateMetadata) { - c.mu.Lock() - defer c.mu.Unlock() - - if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. - c.ldsVersion = metadata.ErrState.Version - for name := range updates { - if _, ok := c.ldsWatchers[name]; ok { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := c.ldsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - c.ldsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. - } - } - return - } - - // If no error received, the status is ACK. - c.ldsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.ldsWatchers[name]; ok { - // Only send the update if this is not an error. - for wi := range s { - wi.newUpdate(update) - } - // Sync cache. - c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, update) - c.ldsCache[name] = update - c.ldsMD[name] = metadata - } - } - // Resources not in the new update were removed by the server, so delete - // them. - for name := range c.ldsCache { - if _, ok := updates[name]; !ok { - // If resource exists in cache, but not in the new update, delete - // the resource from cache, and also send an resource not found - // error to indicate resource removed. - delete(c.ldsCache, name) - c.ldsMD[name] = UpdateMetadata{Status: ServiceStatusNotExist} - for wi := range c.ldsWatchers[name] { - wi.resourceNotFound() - } - } - } - // When LDS resource is removed, we don't delete corresponding RDS cached - // data. The RDS watch will be canceled, and cache entry is removed when the - // last watch is canceled. -} - -// NewRouteConfigs is called by the underlying xdsAPIClient when it receives an -// xDS response. -// -// A response can contain multiple resources. They will be parsed and put in a -// map from resource name to the resource content. -func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdate, metadata UpdateMetadata) { - c.mu.Lock() - defer c.mu.Unlock() - - if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. - c.rdsVersion = metadata.ErrState.Version - for name := range updates { - if _, ok := c.rdsWatchers[name]; ok { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := c.rdsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - c.rdsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. - } - } - return - } - - // If no error received, the status is ACK. - c.rdsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.rdsWatchers[name]; ok { - // Only send the update if this is not an error. - for wi := range s { - wi.newUpdate(update) - } - // Sync cache. - c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, update) - c.rdsCache[name] = update - c.rdsMD[name] = metadata - } - } -} - -// NewClusters is called by the underlying xdsAPIClient when it receives an xDS -// response. -// -// A response can contain multiple resources. They will be parsed and put in a -// map from resource name to the resource content. -func (c *clientImpl) NewClusters(updates map[string]ClusterUpdate, metadata UpdateMetadata) { - c.mu.Lock() - defer c.mu.Unlock() - - if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. - c.cdsVersion = metadata.ErrState.Version - for name := range updates { - if _, ok := c.cdsWatchers[name]; ok { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := c.cdsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - c.cdsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. - } - } - return - } - - // If no error received, the status is ACK. - c.cdsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.cdsWatchers[name]; ok { - // Only send the update if this is not an error. - for wi := range s { - wi.newUpdate(update) - } - // Sync cache. - c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, update) - c.cdsCache[name] = update - c.cdsMD[name] = metadata - } - } - // Resources not in the new update were removed by the server, so delete - // them. - for name := range c.cdsCache { - if _, ok := updates[name]; !ok { - // If resource exists in cache, but not in the new update, delete it - // from cache, and also send an resource not found error to indicate - // resource removed. - delete(c.cdsCache, name) - c.ldsMD[name] = UpdateMetadata{Status: ServiceStatusNotExist} - for wi := range c.cdsWatchers[name] { - wi.resourceNotFound() - } - } - } - // When CDS resource is removed, we don't delete corresponding EDS cached - // data. The EDS watch will be canceled, and cache entry is removed when the - // last watch is canceled. -} - -// NewEndpoints is called by the underlying xdsAPIClient when it receives an -// xDS response. -// -// A response can contain multiple resources. They will be parsed and put in a -// map from resource name to the resource content. -func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdate, metadata UpdateMetadata) { - c.mu.Lock() - defer c.mu.Unlock() - - if metadata.ErrState != nil { - // On NACK, update overall version to the NACKed resp. - c.edsVersion = metadata.ErrState.Version - for name := range updates { - if _, ok := c.edsWatchers[name]; ok { - // On error, keep previous version for each resource. But update - // status and error. - mdCopy := c.edsMD[name] - mdCopy.ErrState = metadata.ErrState - mdCopy.Status = metadata.Status - c.edsMD[name] = mdCopy - // TODO: send the NACK error to the watcher. - } - } - return - } - - // If no error received, the status is ACK. - c.edsVersion = metadata.Version - for name, update := range updates { - if s, ok := c.edsWatchers[name]; ok { - // Only send the update if this is not an error. - for wi := range s { - wi.newUpdate(update) - } - // Sync cache. - c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, update) - c.edsCache[name] = update - c.edsMD[name] = metadata - } - } -} diff --git a/xds/internal/client/cds_test.go b/xds/internal/client/cds_test.go deleted file mode 100644 index 104260759b95..000000000000 --- a/xds/internal/client/cds_test.go +++ /dev/null @@ -1,1031 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "regexp" - "testing" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" - "github.com/golang/protobuf/proto" - anypb "github.com/golang/protobuf/ptypes/any" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - xdsinternal "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/protobuf/types/known/wrapperspb" -) - -const ( - clusterName = "clusterName" - serviceName = "service" -) - -var emptyUpdate = ClusterUpdate{ServiceName: "", EnableLRS: false} - -func (s) TestValidateCluster_Failure(t *testing.T) { - tests := []struct { - name string - cluster *v3clusterpb.Cluster - wantUpdate ClusterUpdate - wantErr bool - }{ - { - name: "non-eds-cluster-type", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - }, - LbPolicy: v3clusterpb.Cluster_LEAST_REQUEST, - }, - wantUpdate: emptyUpdate, - wantErr: true, - }, - { - name: "no-eds-config", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - }, - wantUpdate: emptyUpdate, - wantErr: true, - }, - { - name: "no-ads-config-source", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{}, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - }, - wantUpdate: emptyUpdate, - wantErr: true, - }, - { - name: "non-round-robin-lb-policy", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - }, - LbPolicy: v3clusterpb.Cluster_LEAST_REQUEST, - }, - wantUpdate: emptyUpdate, - wantErr: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if update, err := validateCluster(test.cluster); err == nil { - t.Errorf("validateCluster(%+v) = %v, wanted error", test.cluster, update) - } - }) - } -} - -func (s) TestValidateCluster_Success(t *testing.T) { - tests := []struct { - name string - cluster *v3clusterpb.Cluster - wantUpdate ClusterUpdate - }{ - { - name: "happy-case-no-service-name-no-lrs", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - }, - wantUpdate: emptyUpdate, - }, - { - name: "happy-case-no-lrs", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: false}, - }, - { - name: "happiest-case", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true}, - }, - { - name: "happiest-case-with-circuitbreakers", - cluster: &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - CircuitBreakers: &v3clusterpb.CircuitBreakers{ - Thresholds: []*v3clusterpb.CircuitBreakers_Thresholds{ - { - Priority: v3corepb.RoutingPriority_DEFAULT, - MaxRequests: wrapperspb.UInt32(512), - }, - { - Priority: v3corepb.RoutingPriority_HIGH, - MaxRequests: nil, - }, - }, - }, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - }, - wantUpdate: ClusterUpdate{ServiceName: serviceName, EnableLRS: true, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, - }, - } - - origCircuitBreakingSupport := env.CircuitBreakingSupport - env.CircuitBreakingSupport = true - defer func() { env.CircuitBreakingSupport = origCircuitBreakingSupport }() - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - update, err := validateCluster(test.cluster) - if err != nil { - t.Errorf("validateCluster(%+v) failed: %v", test.cluster, err) - } - if !cmp.Equal(update, test.wantUpdate, cmpopts.EquateEmpty()) { - t.Errorf("validateCluster(%+v) = %v, want: %v", test.cluster, update, test.wantUpdate) - } - }) - } -} - -func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { - // Turn off the env var protection for client-side security. - origClientSideSecurityEnvVar := env.ClientSideSecuritySupport - env.ClientSideSecuritySupport = false - defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() - - cluster := &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "rootInstance", - CertificateName: "rootCert", - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - } - wantUpdate := ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, - } - gotUpdate, err := validateCluster(cluster) - if err != nil { - t.Errorf("validateCluster() failed: %v", err) - } - if diff := cmp.Diff(wantUpdate, gotUpdate); diff != "" { - t.Errorf("validateCluster() returned unexpected diff (-want, got):\n%s", diff) - } -} - -func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { - // Turn on the env var protection for client-side security. - origClientSideSecurityEnvVar := env.ClientSideSecuritySupport - env.ClientSideSecuritySupport = true - defer func() { env.ClientSideSecuritySupport = origClientSideSecurityEnvVar }() - - const ( - identityPluginInstance = "identityPluginInstance" - identityCertName = "identityCert" - rootPluginInstance = "rootPluginInstance" - rootCertName = "rootCert" - serviceName = "service" - sanExact = "san-exact" - sanPrefix = "san-prefix" - sanSuffix = "san-suffix" - sanRegexBad = "??" - sanRegexGood = "san?regex?" - sanContains = "san-contains" - ) - var sanRE = regexp.MustCompile(sanRegexGood) - - tests := []struct { - name string - cluster *v3clusterpb.Cluster - wantUpdate ClusterUpdate - wantErr bool - }{ - { - name: "transport-socket-unsupported-name", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - Name: "unsupported-foo", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "transport-socket-unsupported-typeURL", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3HTTPConnManagerURL, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "transport-socket-unsupported-type", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "transport-socket-unsupported-validation-context", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ - ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ - Name: "foo-sds-secret", - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantErr: true, - }, - { - name: "transport-socket-without-validation-context", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{}, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantErr: true, - }, - { - name: "empty-prefix-in-matching-SAN", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: ""}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantErr: true, - }, - { - name: "empty-suffix-in-matching-SAN", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: ""}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantErr: true, - }, - { - name: "empty-contains-in-matching-SAN", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: ""}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantErr: true, - }, - { - name: "invalid-regex-in-matching-SAN", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexBad}}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantErr: true, - }, - { - name: "happy-case-with-no-identity-certs", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, - SecurityCfg: &SecurityConfig{ - RootInstanceName: rootPluginInstance, - RootCertName: rootCertName, - }, - }, - }, - { - name: "happy-case-with-validation-context-provider-instance", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: identityPluginInstance, - CertificateName: identityCertName, - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, - SecurityCfg: &SecurityConfig{ - RootInstanceName: rootPluginInstance, - RootCertName: rootCertName, - IdentityInstanceName: identityPluginInstance, - IdentityCertName: identityCertName, - }, - }, - }, - { - name: "happy-case-with-combined-validation-context", - cluster: &v3clusterpb.Cluster{ - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.UpstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: identityPluginInstance, - CertificateName: identityCertName, - }, - ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ - CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ - DefaultValidationContext: &v3tlspb.CertificateValidationContext{ - MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ - { - MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: sanExact}, - IgnoreCase: true, - }, - {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: sanPrefix}}, - {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: sanSuffix}}, - {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexGood}}}, - {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: sanContains}}, - }, - }, - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: rootPluginInstance, - CertificateName: rootCertName, - }, - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - wantUpdate: ClusterUpdate{ - ServiceName: serviceName, - EnableLRS: false, - SecurityCfg: &SecurityConfig{ - RootInstanceName: rootPluginInstance, - RootCertName: rootCertName, - IdentityInstanceName: identityPluginInstance, - IdentityCertName: identityCertName, - SubjectAltNameMatchers: []xdsinternal.StringMatcher{ - xdsinternal.StringMatcherForTesting(newStringP(sanExact), nil, nil, nil, nil, true), - xdsinternal.StringMatcherForTesting(nil, newStringP(sanPrefix), nil, nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, newStringP(sanSuffix), nil, nil, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, nil, sanRE, false), - xdsinternal.StringMatcherForTesting(nil, nil, nil, newStringP(sanContains), nil, false), - }, - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - update, err := validateCluster(test.cluster) - if (err != nil) != test.wantErr { - t.Errorf("validateCluster() returned err %v wantErr %v)", err, test.wantErr) - } - if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmp.AllowUnexported(regexp.Regexp{})); diff != "" { - t.Errorf("validateCluster() returned unexpected diff (-want, +got):\n%s", diff) - } - }) - } -} - -func (s) TestUnmarshalCluster(t *testing.T) { - const ( - v2ClusterName = "v2clusterName" - v3ClusterName = "v3clusterName" - v2Service = "v2Service" - v3Service = "v2Service" - ) - var ( - v2Cluster = &v2xdspb.Cluster{ - Name: v2ClusterName, - ClusterDiscoveryType: &v2xdspb.Cluster_Type{Type: v2xdspb.Cluster_EDS}, - EdsClusterConfig: &v2xdspb.Cluster_EdsClusterConfig{ - EdsConfig: &v2corepb.ConfigSource{ - ConfigSourceSpecifier: &v2corepb.ConfigSource_Ads{ - Ads: &v2corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: v2Service, - }, - LbPolicy: v2xdspb.Cluster_ROUND_ROBIN, - LrsServer: &v2corepb.ConfigSource{ - ConfigSourceSpecifier: &v2corepb.ConfigSource_Self{ - Self: &v2corepb.SelfConfigSource{}, - }, - }, - } - v2ClusterAny = &anypb.Any{ - TypeUrl: version.V2ClusterURL, - Value: func() []byte { - mcl, _ := proto.Marshal(v2Cluster) - return mcl - }(), - } - - v3Cluster = &v3clusterpb.Cluster{ - Name: v3ClusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: v3Service, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - } - v3ClusterAny = &anypb.Any{ - TypeUrl: version.V3ClusterURL, - Value: func() []byte { - mcl, _ := proto.Marshal(v3Cluster) - return mcl - }(), - } - ) - const testVersion = "test-version-cds" - - tests := []struct { - name string - resources []*anypb.Any - wantUpdate map[string]ClusterUpdate - wantMD UpdateMetadata - wantErr bool - }{ - { - name: "non-cluster resource type", - resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "badly marshaled cluster resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ClusterURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "bad cluster resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ClusterURL, - Value: func() []byte { - cl := &v3clusterpb.Cluster{ - Name: "test", - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, - } - mcl, _ := proto.Marshal(cl) - return mcl - }(), - }, - }, - wantUpdate: map[string]ClusterUpdate{"test": {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "v2 cluster", - resources: []*anypb.Any{v2ClusterAny}, - wantUpdate: map[string]ClusterUpdate{ - v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, - Raw: v2ClusterAny, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 cluster", - resources: []*anypb.Any{v3ClusterAny}, - wantUpdate: map[string]ClusterUpdate{ - v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, - Raw: v3ClusterAny, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "multiple clusters", - resources: []*anypb.Any{v2ClusterAny, v3ClusterAny}, - wantUpdate: map[string]ClusterUpdate{ - v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, - Raw: v2ClusterAny, - }, - v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, - Raw: v3ClusterAny, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - // To test that unmarshal keeps processing on errors. - name: "good and bad clusters", - resources: []*anypb.Any{ - v2ClusterAny, - { - // bad cluster resource - TypeUrl: version.V3ClusterURL, - Value: func() []byte { - cl := &v3clusterpb.Cluster{ - Name: "bad", - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, - } - mcl, _ := proto.Marshal(cl) - return mcl - }(), - }, - v3ClusterAny, - }, - wantUpdate: map[string]ClusterUpdate{ - v2ClusterName: { - ServiceName: v2Service, EnableLRS: true, - Raw: v2ClusterAny, - }, - v3ClusterName: { - ServiceName: v3Service, EnableLRS: true, - Raw: v3ClusterAny, - }, - "bad": {}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - update, md, err := UnmarshalCluster(testVersion, test.resources, nil) - if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalCluster(), got err: %v, wantErr: %v", err, test.wantErr) - } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) - } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) - } - }) - } -} diff --git a/xds/internal/client/client.go b/xds/internal/client/client.go deleted file mode 100644 index 5c0f38a9782f..000000000000 --- a/xds/internal/client/client.go +++ /dev/null @@ -1,698 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package client implements a full fledged gRPC client for the xDS API used by -// the xds resolver and balancer implementations. -package client - -import ( - "context" - "errors" - "fmt" - "net" - "sync" - "time" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/xds/internal/client/load" - "google.golang.org/grpc/xds/internal/httpfilter" - - "google.golang.org/grpc" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/keepalive" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/client/bootstrap" - "google.golang.org/grpc/xds/internal/version" -) - -var ( - m = make(map[version.TransportAPI]APIClientBuilder) -) - -// RegisterAPIClientBuilder registers a client builder for xDS transport protocol -// version specified by b.Version(). -// -// NOTE: this function must only be called during initialization time (i.e. in -// an init() function), and is not thread-safe. If multiple builders are -// registered for the same version, the one registered last will take effect. -func RegisterAPIClientBuilder(b APIClientBuilder) { - m[b.Version()] = b -} - -// getAPIClientBuilder returns the client builder registered for the provided -// xDS transport API version. -func getAPIClientBuilder(version version.TransportAPI) APIClientBuilder { - if b, ok := m[version]; ok { - return b - } - return nil -} - -// BuildOptions contains options to be passed to client builders. -type BuildOptions struct { - // Parent is a top-level xDS client which has the intelligence to take - // appropriate action based on xDS responses received from the management - // server. - Parent UpdateHandler - // NodeProto contains the Node proto to be used in xDS requests. The actual - // type depends on the transport protocol version used. - NodeProto proto.Message - // Backoff returns the amount of time to backoff before retrying broken - // streams. - Backoff func(int) time.Duration - // Logger provides enhanced logging capabilities. - Logger *grpclog.PrefixLogger -} - -// APIClientBuilder creates an xDS client for a specific xDS transport protocol -// version. -type APIClientBuilder interface { - // Build builds a transport protocol specific implementation of the xDS - // client based on the provided clientConn to the management server and the - // provided options. - Build(*grpc.ClientConn, BuildOptions) (APIClient, error) - // Version returns the xDS transport protocol version used by clients build - // using this builder. - Version() version.TransportAPI -} - -// APIClient represents the functionality provided by transport protocol -// version specific implementations of the xDS client. -// -// TODO: unexport this interface and all the methods after the PR to make -// xdsClient sharable by clients. AddWatch and RemoveWatch are exported for -// v2/v3 to override because they need to keep track of LDS name for RDS to use. -// After the share xdsClient change, that's no longer necessary. After that, we -// will still keep this interface for testing purposes. -type APIClient interface { - // AddWatch adds a watch for an xDS resource given its type and name. - AddWatch(ResourceType, string) - - // RemoveWatch cancels an already registered watch for an xDS resource - // given its type and name. - RemoveWatch(ResourceType, string) - - // reportLoad starts an LRS stream to periodically report load using the - // provided ClientConn, which represent a connection to the management - // server. - reportLoad(ctx context.Context, cc *grpc.ClientConn, opts loadReportingOptions) - - // Close cleans up resources allocated by the API client. - Close() -} - -// loadReportingOptions contains configuration knobs for reporting load data. -type loadReportingOptions struct { - loadStore *load.Store -} - -// UpdateHandler receives and processes (by taking appropriate actions) xDS -// resource updates from an APIClient for a specific version. -type UpdateHandler interface { - // NewListeners handles updates to xDS listener resources. - NewListeners(map[string]ListenerUpdate, UpdateMetadata) - // NewRouteConfigs handles updates to xDS RouteConfiguration resources. - NewRouteConfigs(map[string]RouteConfigUpdate, UpdateMetadata) - // NewClusters handles updates to xDS Cluster resources. - NewClusters(map[string]ClusterUpdate, UpdateMetadata) - // NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely - // referred to as Endpoints) resources. - NewEndpoints(map[string]EndpointsUpdate, UpdateMetadata) -} - -// ServiceStatus is the status of the update. -type ServiceStatus int - -const ( - // ServiceStatusUnknown is the default state, before a watch is started for - // the resource. - ServiceStatusUnknown ServiceStatus = iota - // ServiceStatusRequested is when the watch is started, but before and - // response is received. - ServiceStatusRequested - // ServiceStatusNotExist is when the resource doesn't exist in - // state-of-the-world responses (e.g. LDS and CDS), which means the resource - // is removed by the management server. - ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS. - // ServiceStatusACKed is when the resource is ACKed. - ServiceStatusACKed - // ServiceStatusNACKed is when the resource is NACKed. - ServiceStatusNACKed -) - -// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state -// when a response is NACKed. -type UpdateErrorMetadata struct { - // Version is the version of the NACKed response. - Version string - // Err contains why the response was NACKed. - Err error - // Timestamp is when the NACKed response was received. - Timestamp time.Time -} - -// UpdateMetadata contains the metadata for each update, including timestamp, -// raw message, and so on. -type UpdateMetadata struct { - // Status is the status of this resource, e.g. ACKed, NACKed, or - // Not_exist(removed). - Status ServiceStatus - // Version is the version of the xds response. Note that this is the version - // of the resource in use (previous ACKed). If a response is NACKed, the - // NACKed version is in ErrState. - Version string - // Timestamp is when the response is received. - Timestamp time.Time - // ErrState is set when the update is NACKed. - ErrState *UpdateErrorMetadata -} - -// ListenerUpdate contains information received in an LDS response, which is of -// interest to the registered LDS watcher. -type ListenerUpdate struct { - // RouteConfigName is the route configuration name corresponding to the - // target which is being watched through LDS. - RouteConfigName string - // MaxStreamDuration contains the HTTP connection manager's - // common_http_protocol_options.max_stream_duration field, or zero if - // unset. - MaxStreamDuration time.Duration - // HTTPFilters is a list of HTTP filters (name, config) from the LDS - // response. - HTTPFilters []HTTPFilter - // InboundListenerCfg contains inbound listener configuration. - InboundListenerCfg *InboundListenerConfig - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection -// manager field. -type HTTPFilter struct { - // Name is an arbitrary name of the filter. Used for applying override - // settings in virtual host / route / weighted cluster configuration (not - // yet supported). - Name string - // Filter is the HTTP filter found in the registry for the config type. - Filter httpfilter.Filter - // Config contains the filter's configuration - Config httpfilter.FilterConfig -} - -// InboundListenerConfig contains information about the inbound listener, i.e -// the server-side listener. -type InboundListenerConfig struct { - // Address is the local address on which the inbound listener is expected to - // accept incoming connections. - Address string - // Port is the local port on which the inbound listener is expected to - // accept incoming connections. - Port string - // FilterChains is the list of filter chains associated with this listener. - FilterChains []*FilterChain - // DefaultFilterChain is the filter chain to be used when none of the above - // filter chains matches an incoming connection. - DefaultFilterChain *FilterChain -} - -// FilterChain wraps a set of match criteria and associated security -// configuration. -// -// The actual set filters associated with this filter chain are not captured -// here, since we do not support these filters on the server yet. -type FilterChain struct { - // Match contains the criteria to use when matching a connection to this - // filter chain. - Match *FilterChainMatch - // SecurityCfg contains transport socket security configuration. - SecurityCfg *SecurityConfig -} - -// SourceType specifies the connection source IP match type. -type SourceType int - -const ( - // SourceTypeAny matches connection attempts from any source. - SourceTypeAny SourceType = iota - // SourceTypeSameOrLoopback matches connection attempts from the same host. - SourceTypeSameOrLoopback - // SourceTypeExternal matches connection attempts from a different host. - SourceTypeExternal -) - -// FilterChainMatch specifies the match criteria for selecting a specific filter -// chain of a listener, for an incoming connection. -// -// The xDS FilterChainMatch proto specifies 8 match criteria. But we only have a -// subset of those fields here because we explicitly ignore filter chains whose -// match criteria specifies values for fields like destination_port, -// server_names, application_protocols, transport_protocol. -type FilterChainMatch struct { - // DestPrefixRanges specifies a set of IP addresses and prefix lengths to - // match the destination address of the incoming connection when the - // listener is bound to 0.0.0.0/[::]. If this field is empty, the - // destination address is ignored. - DestPrefixRanges []net.IP - // SourceType specifies the connection source IP match type. Can be any, - // local or external network. - SourceType SourceType - // SourcePrefixRanges specifies a set of IP addresses and prefix lengths to - // match the source address of the incoming connection. If this field is - // empty, the source address is ignored. - SourcePrefixRanges []net.IP - // SourcePorts specifies a set of ports to match the source port of the - // incoming connection. If this field is empty, the source port is ignored. - SourcePorts []uint32 -} - -// RouteConfigUpdate contains information received in an RDS response, which is -// of interest to the registered RDS watcher. -type RouteConfigUpdate struct { - VirtualHosts []*VirtualHost - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// VirtualHost contains the routes for a list of Domains. -// -// Note that the domains in this slice can be a wildcard, not an exact string. -// The consumer of this struct needs to find the best match for its hostname. -type VirtualHost struct { - Domains []string - // Routes contains a list of routes, each containing matchers and - // corresponding action. - Routes []*Route - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the virtual host which may be present. An individual filter's override - // may be unused if the matching Route contains an override for that - // filter. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig -} - -// Route is both a specification of how to match a request as well as an -// indication of the action to take upon match. -type Route struct { - Path, Prefix, Regex *string - // Indicates if prefix/path matching should be case insensitive. The default - // is false (case sensitive). - CaseInsensitive bool - Headers []*HeaderMatcher - Fraction *uint32 - - // If the matchers above indicate a match, the below configuration is used. - WeightedClusters map[string]WeightedCluster - // If MaxStreamDuration is nil, it indicates neither of the route action's - // max_stream_duration fields (grpc_timeout_header_max nor - // max_stream_duration) were set. In this case, the ListenerUpdate's - // MaxStreamDuration field should be used. If MaxStreamDuration is set to - // an explicit zero duration, the application's deadline should be used. - MaxStreamDuration *time.Duration - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the route which may be present. An individual filter's override may be - // unused if the matching WeightedCluster contains an override for that - // filter. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig -} - -// WeightedCluster contains settings for an xds RouteAction.WeightedCluster. -type WeightedCluster struct { - // Weight is the relative weight of the cluster. It will never be zero. - Weight uint32 - // HTTPFilterConfigOverride contains any HTTP filter config overrides for - // the weighted cluster which may be present. - HTTPFilterConfigOverride map[string]httpfilter.FilterConfig -} - -// HeaderMatcher represents header matchers. -type HeaderMatcher struct { - Name string `json:"name"` - InvertMatch *bool `json:"invertMatch,omitempty"` - ExactMatch *string `json:"exactMatch,omitempty"` - RegexMatch *string `json:"regexMatch,omitempty"` - PrefixMatch *string `json:"prefixMatch,omitempty"` - SuffixMatch *string `json:"suffixMatch,omitempty"` - RangeMatch *Int64Range `json:"rangeMatch,omitempty"` - PresentMatch *bool `json:"presentMatch,omitempty"` -} - -// Int64Range is a range for header range match. -type Int64Range struct { - Start int64 `json:"start"` - End int64 `json:"end"` -} - -// SecurityConfig contains the security configuration received as part of the -// Cluster resource on the client-side, and as part of the Listener resource on -// the server-side. -type SecurityConfig struct { - // RootInstanceName identifies the certProvider plugin to be used to fetch - // root certificates. This instance name will be resolved to the plugin name - // and its associated configuration from the certificate_providers field of - // the bootstrap file. - RootInstanceName string - // RootCertName is the certificate name to be passed to the plugin (looked - // up from the bootstrap file) while fetching root certificates. - RootCertName string - // IdentityInstanceName identifies the certProvider plugin to be used to - // fetch identity certificates. This instance name will be resolved to the - // plugin name and its associated configuration from the - // certificate_providers field of the bootstrap file. - IdentityInstanceName string - // IdentityCertName is the certificate name to be passed to the plugin - // (looked up from the bootstrap file) while fetching identity certificates. - IdentityCertName string - // SubjectAltNameMatchers is an optional list of match criteria for SANs - // specified on the peer certificate. Used only on the client-side. - // - // Some intricacies: - // - If this field is empty, then any peer certificate is accepted. - // - If the peer certificate contains a wildcard DNS SAN, and an `exact` - // matcher is configured, a wildcard DNS match is performed instead of a - // regular string comparison. - SubjectAltNameMatchers []xds.StringMatcher - // RequireClientCert indicates if the server handshake process expects the - // client to present a certificate. Set to true when performing mTLS. Used - // only on the server-side. - RequireClientCert bool -} - -// ClusterUpdate contains information from a received CDS response, which is of -// interest to the registered CDS watcher. -type ClusterUpdate struct { - // ServiceName is the service name corresponding to the clusterName which - // is being watched for through CDS. - ServiceName string - // EnableLRS indicates whether or not load should be reported through LRS. - EnableLRS bool - // SecurityCfg contains security configuration sent by the control plane. - SecurityCfg *SecurityConfig - // MaxRequests for circuit breaking, if any (otherwise nil). - MaxRequests *uint32 - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// OverloadDropConfig contains the config to drop overloads. -type OverloadDropConfig struct { - Category string - Numerator uint32 - Denominator uint32 -} - -// EndpointHealthStatus represents the health status of an endpoint. -type EndpointHealthStatus int32 - -const ( - // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. - EndpointHealthStatusUnknown EndpointHealthStatus = iota - // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. - EndpointHealthStatusHealthy - // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. - EndpointHealthStatusUnhealthy - // EndpointHealthStatusDraining represents HealthStatus DRAINING. - EndpointHealthStatusDraining - // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. - EndpointHealthStatusTimeout - // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. - EndpointHealthStatusDegraded -) - -// Endpoint contains information of an endpoint. -type Endpoint struct { - Address string - HealthStatus EndpointHealthStatus - Weight uint32 -} - -// Locality contains information of a locality. -type Locality struct { - Endpoints []Endpoint - ID internal.LocalityID - Priority uint32 - Weight uint32 -} - -// EndpointsUpdate contains an EDS update. -type EndpointsUpdate struct { - Drops []OverloadDropConfig - Localities []Locality - - // Raw is the resource from the xds response. - Raw *anypb.Any -} - -// Function to be overridden in tests. -var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) { - cb := getAPIClientBuilder(apiVersion) - if cb == nil { - return nil, fmt.Errorf("no client builder for xDS API version: %v", apiVersion) - } - return cb.Build(cc, opts) -} - -// clientImpl is the real implementation of the xds client. The exported Client -// is a wrapper of this struct with a ref count. -// -// Implements UpdateHandler interface. -// TODO(easwars): Make a wrapper struct which implements this interface in the -// style of ccBalancerWrapper so that the Client type does not implement these -// exported methods. -type clientImpl struct { - done *grpcsync.Event - config *bootstrap.Config - cc *grpc.ClientConn // Connection to the management server. - apiClient APIClient - watchExpiryTimeout time.Duration - - logger *grpclog.PrefixLogger - - updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate - // All the following maps are to keep the updates/metadata in a cache. - // TODO: move them to a separate struct/package, to cleanup the xds_client. - // And CSDS handler can be implemented directly by the cache. - mu sync.Mutex - ldsWatchers map[string]map[*watchInfo]bool - ldsVersion string // Only used in CSDS. - ldsCache map[string]ListenerUpdate - ldsMD map[string]UpdateMetadata - rdsWatchers map[string]map[*watchInfo]bool - rdsVersion string // Only used in CSDS. - rdsCache map[string]RouteConfigUpdate - rdsMD map[string]UpdateMetadata - cdsWatchers map[string]map[*watchInfo]bool - cdsVersion string // Only used in CSDS. - cdsCache map[string]ClusterUpdate - cdsMD map[string]UpdateMetadata - edsWatchers map[string]map[*watchInfo]bool - edsVersion string // Only used in CSDS. - edsCache map[string]EndpointsUpdate - edsMD map[string]UpdateMetadata - - // Changes to map lrsClients and the lrsClient inside the map need to be - // protected by lrsMu. - lrsMu sync.Mutex - lrsClients map[string]*lrsClient -} - -// newWithConfig returns a new xdsClient with the given config. -func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (*clientImpl, error) { - switch { - case config.BalancerName == "": - return nil, errors.New("xds: no xds_server name provided in options") - case config.Creds == nil: - return nil, errors.New("xds: no credentials provided in options") - case config.NodeProto == nil: - return nil, errors.New("xds: no node_proto provided in options") - } - - switch config.TransportAPI { - case version.TransportV2: - if _, ok := config.NodeProto.(*v2corepb.Node); !ok { - return nil, fmt.Errorf("xds: Node proto type (%T) does not match API version: %v", config.NodeProto, config.TransportAPI) - } - case version.TransportV3: - if _, ok := config.NodeProto.(*v3corepb.Node); !ok { - return nil, fmt.Errorf("xds: Node proto type (%T) does not match API version: %v", config.NodeProto, config.TransportAPI) - } - } - - dopts := []grpc.DialOption{ - config.Creds, - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: 5 * time.Minute, - Timeout: 20 * time.Second, - }), - } - - c := &clientImpl{ - done: grpcsync.NewEvent(), - config: config, - watchExpiryTimeout: watchExpiryTimeout, - - updateCh: buffer.NewUnbounded(), - ldsWatchers: make(map[string]map[*watchInfo]bool), - ldsCache: make(map[string]ListenerUpdate), - ldsMD: make(map[string]UpdateMetadata), - rdsWatchers: make(map[string]map[*watchInfo]bool), - rdsCache: make(map[string]RouteConfigUpdate), - rdsMD: make(map[string]UpdateMetadata), - cdsWatchers: make(map[string]map[*watchInfo]bool), - cdsCache: make(map[string]ClusterUpdate), - cdsMD: make(map[string]UpdateMetadata), - edsWatchers: make(map[string]map[*watchInfo]bool), - edsCache: make(map[string]EndpointsUpdate), - edsMD: make(map[string]UpdateMetadata), - lrsClients: make(map[string]*lrsClient), - } - - cc, err := grpc.Dial(config.BalancerName, dopts...) - if err != nil { - // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("xds: failed to dial balancer {%s}: %v", config.BalancerName, err) - } - c.cc = cc - c.logger = prefixLogger((c)) - c.logger.Infof("Created ClientConn to xDS management server: %s", config.BalancerName) - - apiClient, err := newAPIClient(config.TransportAPI, cc, BuildOptions{ - Parent: c, - NodeProto: config.NodeProto, - Backoff: backoff.DefaultExponential.Backoff, - Logger: c.logger, - }) - if err != nil { - return nil, err - } - c.apiClient = apiClient - c.logger.Infof("Created") - go c.run() - return c, nil -} - -// BootstrapConfig returns the configuration read from the bootstrap file. -// Callers must treat the return value as read-only. -func (c *Client) BootstrapConfig() *bootstrap.Config { - return c.config -} - -// run is a goroutine for all the callbacks. -// -// Callback can be called in watch(), if an item is found in cache. Without this -// goroutine, the callback will be called inline, which might cause a deadlock -// in user's code. Callbacks also cannot be simple `go callback()` because the -// order matters. -func (c *clientImpl) run() { - for { - select { - case t := <-c.updateCh.Get(): - c.updateCh.Load() - if c.done.HasFired() { - return - } - c.callCallback(t.(*watcherInfoWithUpdate)) - case <-c.done.Done(): - return - } - } -} - -// Close closes the gRPC connection to the management server. -func (c *clientImpl) Close() { - if c.done.HasFired() { - return - } - c.done.Fire() - // TODO: Should we invoke the registered callbacks here with an error that - // the client is closed? - c.apiClient.Close() - c.cc.Close() - c.logger.Infof("Shutdown") -} - -// ResourceType identifies resources in a transport protocol agnostic way. These -// will be used in transport version agnostic code, while the versioned API -// clients will map these to appropriate version URLs. -type ResourceType int - -// Version agnostic resource type constants. -const ( - UnknownResource ResourceType = iota - ListenerResource - HTTPConnManagerResource - RouteConfigResource - ClusterResource - EndpointsResource -) - -func (r ResourceType) String() string { - switch r { - case ListenerResource: - return "ListenerResource" - case HTTPConnManagerResource: - return "HTTPConnManagerResource" - case RouteConfigResource: - return "RouteConfigResource" - case ClusterResource: - return "ClusterResource" - case EndpointsResource: - return "EndpointsResource" - default: - return "UnknownResource" - } -} - -// IsListenerResource returns true if the provider URL corresponds to an xDS -// Listener resource. -func IsListenerResource(url string) bool { - return url == version.V2ListenerURL || url == version.V3ListenerURL -} - -// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS -// HTTPConnManager resource. -func IsHTTPConnManagerResource(url string) bool { - return url == version.V2HTTPConnManagerURL || url == version.V3HTTPConnManagerURL -} - -// IsRouteConfigResource returns true if the provider URL corresponds to an xDS -// RouteConfig resource. -func IsRouteConfigResource(url string) bool { - return url == version.V2RouteConfigURL || url == version.V3RouteConfigURL -} - -// IsClusterResource returns true if the provider URL corresponds to an xDS -// Cluster resource. -func IsClusterResource(url string) bool { - return url == version.V2ClusterURL || url == version.V3ClusterURL -} - -// IsEndpointsResource returns true if the provider URL corresponds to an xDS -// Endpoints resource. -func IsEndpointsResource(url string) bool { - return url == version.V2EndpointsURL || url == version.V3EndpointsURL -} diff --git a/xds/internal/client/client_test.go b/xds/internal/client/client_test.go deleted file mode 100644 index 8275ea60e0dc..000000000000 --- a/xds/internal/client/client_test.go +++ /dev/null @@ -1,346 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal/client/bootstrap" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/protobuf/testing/protocmp" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -const ( - testXDSServer = "xds-server" - - testLDSName = "test-lds" - testRDSName = "test-rds" - testCDSName = "test-cds" - testEDSName = "test-eds" - - defaultTestWatchExpiryTimeout = 500 * time.Millisecond - defaultTestTimeout = 5 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. -) - -var ( - cmpOpts = cmp.Options{ - cmpopts.EquateEmpty(), - cmp.Comparer(func(a, b time.Time) bool { return true }), - cmp.Comparer(func(x, y error) bool { - if x == nil || y == nil { - return x == nil && y == nil - } - return x.Error() == y.Error() - }), - protocmp.Transform(), - } - - // When comparing NACK UpdateMetadata, we only care if error is nil, but not - // the details in error. - errPlaceHolder = fmt.Errorf("error whose details don't matter") - cmpOptsIgnoreDetails = cmp.Options{ - cmp.Comparer(func(a, b time.Time) bool { return true }), - cmp.Comparer(func(x, y error) bool { - return (x == nil) == (y == nil) - }), - } -) - -func clientOpts(balancerName string, overrideWatchExpiryTimeout bool) (*bootstrap.Config, time.Duration) { - watchExpiryTimeout := defaultWatchExpiryTimeout - if overrideWatchExpiryTimeout { - watchExpiryTimeout = defaultTestWatchExpiryTimeout - } - return &bootstrap.Config{ - BalancerName: balancerName, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, watchExpiryTimeout -} - -type testAPIClient struct { - done *grpcsync.Event - addWatches map[ResourceType]*testutils.Channel - removeWatches map[ResourceType]*testutils.Channel -} - -func overrideNewAPIClient() (*testutils.Channel, func()) { - origNewAPIClient := newAPIClient - ch := testutils.NewChannel() - newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) { - ret := newTestAPIClient() - ch.Send(ret) - return ret, nil - } - return ch, func() { newAPIClient = origNewAPIClient } -} - -func newTestAPIClient() *testAPIClient { - addWatches := map[ResourceType]*testutils.Channel{ - ListenerResource: testutils.NewChannel(), - RouteConfigResource: testutils.NewChannel(), - ClusterResource: testutils.NewChannel(), - EndpointsResource: testutils.NewChannel(), - } - removeWatches := map[ResourceType]*testutils.Channel{ - ListenerResource: testutils.NewChannel(), - RouteConfigResource: testutils.NewChannel(), - ClusterResource: testutils.NewChannel(), - EndpointsResource: testutils.NewChannel(), - } - return &testAPIClient{ - done: grpcsync.NewEvent(), - addWatches: addWatches, - removeWatches: removeWatches, - } -} - -func (c *testAPIClient) AddWatch(resourceType ResourceType, resourceName string) { - c.addWatches[resourceType].Send(resourceName) -} - -func (c *testAPIClient) RemoveWatch(resourceType ResourceType, resourceName string) { - c.removeWatches[resourceType].Send(resourceName) -} - -func (c *testAPIClient) reportLoad(context.Context, *grpc.ClientConn, loadReportingOptions) { -} - -func (c *testAPIClient) Close() { - c.done.Fire() -} - -// TestWatchCallAnotherWatch covers the case where watch() is called inline by a -// callback. It makes sure it doesn't cause a deadlock. -func (s) TestWatchCallAnotherWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - clusterUpdateCh := testutils.NewChannel() - firstTime := true - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) - // Calls another watch inline, to ensure there's deadlock. - client.WatchCluster("another-random-name", func(ClusterUpdate, error) {}) - - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); firstTime && err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - firstTime = false - }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := ClusterUpdate{ServiceName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate2}, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate2); err != nil { - t.Fatal(err) - } -} - -func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ListenerUpdate) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for listener update: %v", err) - } - gotUpdate := u.(ldsUpdateErr) - if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) { - return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) - } - return nil -} - -func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate RouteConfigUpdate) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for route configuration update: %v", err) - } - gotUpdate := u.(rdsUpdateErr) - if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) { - return fmt.Errorf("unexpected route config update: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) - } - return nil -} - -func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate ClusterUpdate) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for cluster update: %v", err) - } - gotUpdate := u.(clusterUpdateErr) - if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate) { - return fmt.Errorf("unexpected clusterUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) - } - return nil -} - -func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate EndpointsUpdate) error { - u, err := updateCh.Receive(ctx) - if err != nil { - return fmt.Errorf("timeout when waiting for endpoints update: %v", err) - } - gotUpdate := u.(endpointsUpdateErr) - if gotUpdate.err != nil || !cmp.Equal(gotUpdate.u, wantUpdate, cmpopts.EquateEmpty()) { - return fmt.Errorf("unexpected endpointsUpdate: (%v, %v), want: (%v, nil)", gotUpdate.u, gotUpdate.err, wantUpdate) - } - return nil -} - -// Test that multiple New() returns the same Client. And only when the last -// client is closed, the underlying client is closed. -func (s) TestClientNewSingleton(t *testing.T) { - oldBootstrapNewConfig := bootstrapNewConfig - bootstrapNewConfig = func() (*bootstrap.Config, error) { - return &bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithInsecure(), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, nil - } - defer func() { bootstrapNewConfig = oldBootstrapNewConfig }() - - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - // The first New(). Should create a Client and a new APIClient. - client, err := New() - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - clientImpl := client.clientImpl - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - // Call New() again. They should all return the same client implementation, - // and should not create new API client. - const count = 9 - for i := 0; i < count; i++ { - tc, terr := New() - if terr != nil { - client.Close() - t.Fatalf("%d-th call to New() failed with error: %v", i, terr) - } - if tc.clientImpl != clientImpl { - client.Close() - tc.Close() - t.Fatalf("%d-th call to New() got a different client %p, want %p", i, tc.clientImpl, clientImpl) - } - - sctx, scancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer scancel() - _, err := apiClientCh.Receive(sctx) - if err == nil { - client.Close() - t.Fatalf("%d-th call to New() created a new API client", i) - } - } - - // Call Close(). Nothing should be actually closed until the last ref calls - // Close(). - for i := 0; i < count; i++ { - client.Close() - if clientImpl.done.HasFired() { - t.Fatalf("%d-th call to Close(), unexpected done in the client implemenation", i) - } - if apiClient.done.HasFired() { - t.Fatalf("%d-th call to Close(), unexpected done in the API client", i) - } - } - - // Call the last Close(). The underlying implementation and API Client - // should all be closed. - client.Close() - if !clientImpl.done.HasFired() { - t.Fatalf("want client implementation to be closed, got not done") - } - if !apiClient.done.HasFired() { - t.Fatalf("want API client to be closed, got not done") - } - - // Call New() again after the previous Client is actually closed. Should - // create a Client and a new APIClient. - client2, err2 := New() - if err2 != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client2.Close() - c2, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient2 := c2.(*testAPIClient) - - // The client wrapper with ref count should be the same. - if client2 != client { - t.Fatalf("New() after Close() should return the same client wrapper, got different %p, %p", client2, client) - } - if client2.clientImpl == clientImpl { - t.Fatalf("New() after Close() should return different client implementation, got the same %p", client2.clientImpl) - } - if apiClient2 == apiClient { - t.Fatalf("New() after Close() should return different API client, got the same %p", apiClient2) - } -} diff --git a/xds/internal/client/dump.go b/xds/internal/client/dump.go deleted file mode 100644 index 3fd18f6103b3..000000000000 --- a/xds/internal/client/dump.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import anypb "github.com/golang/protobuf/ptypes/any" - -// UpdateWithMD contains the raw message of the update and the metadata, -// including version, raw message, timestamp. -// -// This is to be used for config dump and CSDS, not directly by users (like -// resolvers/balancers). -type UpdateWithMD struct { - MD UpdateMetadata - Raw *anypb.Any -} - -func rawFromCache(s string, cache interface{}) *anypb.Any { - switch c := cache.(type) { - case map[string]ListenerUpdate: - v, ok := c[s] - if !ok { - return nil - } - return v.Raw - case map[string]RouteConfigUpdate: - v, ok := c[s] - if !ok { - return nil - } - return v.Raw - case map[string]ClusterUpdate: - v, ok := c[s] - if !ok { - return nil - } - return v.Raw - case map[string]EndpointsUpdate: - v, ok := c[s] - if !ok { - return nil - } - return v.Raw - default: - return nil - } -} - -func (c *clientImpl) dump(t ResourceType) (string, map[string]UpdateWithMD) { - c.mu.Lock() - defer c.mu.Unlock() - - var ( - version string - md map[string]UpdateMetadata - cache interface{} - ) - switch t { - case ListenerResource: - version = c.ldsVersion - md = c.ldsMD - cache = c.ldsCache - case RouteConfigResource: - version = c.rdsVersion - md = c.rdsMD - cache = c.rdsCache - case ClusterResource: - version = c.cdsVersion - md = c.cdsMD - cache = c.cdsCache - case EndpointsResource: - version = c.edsVersion - md = c.edsMD - cache = c.edsCache - default: - c.logger.Errorf("dumping resource of unknown type: %v", t) - return "", nil - } - - ret := make(map[string]UpdateWithMD, len(md)) - for s, md := range md { - ret[s] = UpdateWithMD{ - MD: md, - Raw: rawFromCache(s, cache), - } - } - return version, ret -} - -// DumpLDS returns the status and contents of LDS. -func (c *clientImpl) DumpLDS() (string, map[string]UpdateWithMD) { - return c.dump(ListenerResource) -} - -// DumpRDS returns the status and contents of RDS. -func (c *clientImpl) DumpRDS() (string, map[string]UpdateWithMD) { - return c.dump(RouteConfigResource) -} - -// DumpCDS returns the status and contents of CDS. -func (c *clientImpl) DumpCDS() (string, map[string]UpdateWithMD) { - return c.dump(ClusterResource) -} - -// DumpEDS returns the status and contents of EDS. -func (c *clientImpl) DumpEDS() (string, map[string]UpdateWithMD) { - return c.dump(EndpointsResource) -} diff --git a/xds/internal/client/eds_test.go b/xds/internal/client/eds_test.go deleted file mode 100644 index daa5d6525e19..000000000000 --- a/xds/internal/client/eds_test.go +++ /dev/null @@ -1,406 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "fmt" - "net" - "strconv" - "testing" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" - "github.com/golang/protobuf/proto" - anypb "github.com/golang/protobuf/ptypes/any" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/version" -) - -func (s) TestEDSParseRespProto(t *testing.T) { - tests := []struct { - name string - m *v3endpointpb.ClusterLoadAssignment - want EndpointsUpdate - wantErr bool - }{ - { - name: "missing-priority", - m: func() *v3endpointpb.ClusterLoadAssignment { - clab0 := newClaBuilder("test", nil) - clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) - return clab0.Build() - }(), - want: EndpointsUpdate{}, - wantErr: true, - }, - { - name: "missing-locality-ID", - m: func() *v3endpointpb.ClusterLoadAssignment { - clab0 := newClaBuilder("test", nil) - clab0.addLocality("", 1, 0, []string{"addr1:314"}, nil) - return clab0.Build() - }(), - want: EndpointsUpdate{}, - wantErr: true, - }, - { - name: "good", - m: func() *v3endpointpb.ClusterLoadAssignment { - clab0 := newClaBuilder("test", nil) - clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{ - Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY}, - Weight: []uint32{271}, - }) - clab0.addLocality("locality-2", 1, 0, []string{"addr2:159"}, &addLocalityOptions{ - Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING}, - Weight: []uint32{828}, - }) - return clab0.Build() - }(), - want: EndpointsUpdate{ - Drops: nil, - Localities: []Locality{ - { - Endpoints: []Endpoint{{ - Address: "addr1:314", - HealthStatus: EndpointHealthStatusUnhealthy, - Weight: 271, - }}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []Endpoint{{ - Address: "addr2:159", - HealthStatus: EndpointHealthStatusDraining, - Weight: 828, - }}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, - }, - }, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := parseEDSRespProto(tt.m) - if (err != nil) != tt.wantErr { - t.Errorf("parseEDSRespProto() error = %v, wantErr %v", err, tt.wantErr) - return - } - if d := cmp.Diff(got, tt.want); d != "" { - t.Errorf("parseEDSRespProto() got = %v, want %v, diff: %v", got, tt.want, d) - } - }) - } -} - -func (s) TestUnmarshalEndpoints(t *testing.T) { - var v3EndpointsAny = &anypb.Any{ - TypeUrl: version.V3EndpointsURL, - Value: func() []byte { - clab0 := newClaBuilder("test", nil) - clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{ - Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY}, - Weight: []uint32{271}, - }) - clab0.addLocality("locality-2", 1, 0, []string{"addr2:159"}, &addLocalityOptions{ - Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING}, - Weight: []uint32{828}, - }) - e := clab0.Build() - me, _ := proto.Marshal(e) - return me - }(), - } - const testVersion = "test-version-eds" - - tests := []struct { - name string - resources []*anypb.Any - wantUpdate map[string]EndpointsUpdate - wantMD UpdateMetadata - wantErr bool - }{ - { - name: "non-clusterLoadAssignment resource type", - resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "badly marshaled clusterLoadAssignment resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3EndpointsURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "bad endpoints resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3EndpointsURL, - Value: func() []byte { - clab0 := newClaBuilder("test", nil) - clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) - e := clab0.Build() - me, _ := proto.Marshal(e) - return me - }(), - }, - }, - wantUpdate: map[string]EndpointsUpdate{"test": {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "v3 endpoints", - resources: []*anypb.Any{v3EndpointsAny}, - wantUpdate: map[string]EndpointsUpdate{ - "test": { - Drops: nil, - Localities: []Locality{ - { - Endpoints: []Endpoint{{ - Address: "addr1:314", - HealthStatus: EndpointHealthStatusUnhealthy, - Weight: 271, - }}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []Endpoint{{ - Address: "addr2:159", - HealthStatus: EndpointHealthStatusDraining, - Weight: 828, - }}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, - }, - Raw: v3EndpointsAny, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - // To test that unmarshal keeps processing on errors. - name: "good and bad endpoints", - resources: []*anypb.Any{ - v3EndpointsAny, - { - // bad endpoints resource - TypeUrl: version.V3EndpointsURL, - Value: func() []byte { - clab0 := newClaBuilder("bad", nil) - clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) - e := clab0.Build() - me, _ := proto.Marshal(e) - return me - }(), - }, - }, - wantUpdate: map[string]EndpointsUpdate{ - "test": { - Drops: nil, - Localities: []Locality{ - { - Endpoints: []Endpoint{{ - Address: "addr1:314", - HealthStatus: EndpointHealthStatusUnhealthy, - Weight: 271, - }}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []Endpoint{{ - Address: "addr2:159", - HealthStatus: EndpointHealthStatusDraining, - Weight: 828, - }}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, - }, - Raw: v3EndpointsAny, - }, - "bad": {}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - update, md, err := UnmarshalEndpoints(testVersion, test.resources, nil) - if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalEndpoints(), got err: %v, wantErr: %v", err, test.wantErr) - } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) - } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) - } - }) - } -} - -// claBuilder builds a ClusterLoadAssignment, aka EDS -// response. -type claBuilder struct { - v *v3endpointpb.ClusterLoadAssignment -} - -// newClaBuilder creates a claBuilder. -func newClaBuilder(clusterName string, dropPercents []uint32) *claBuilder { - var drops []*v3endpointpb.ClusterLoadAssignment_Policy_DropOverload - for i, d := range dropPercents { - drops = append(drops, &v3endpointpb.ClusterLoadAssignment_Policy_DropOverload{ - Category: fmt.Sprintf("test-drop-%d", i), - DropPercentage: &v3typepb.FractionalPercent{ - Numerator: d, - Denominator: v3typepb.FractionalPercent_HUNDRED, - }, - }) - } - - return &claBuilder{ - v: &v3endpointpb.ClusterLoadAssignment{ - ClusterName: clusterName, - Policy: &v3endpointpb.ClusterLoadAssignment_Policy{ - DropOverloads: drops, - }, - }, - } -} - -// addLocalityOptions contains options when adding locality to the builder. -type addLocalityOptions struct { - Health []v3corepb.HealthStatus - Weight []uint32 -} - -// addLocality adds a locality to the builder. -func (clab *claBuilder) addLocality(subzone string, weight uint32, priority uint32, addrsWithPort []string, opts *addLocalityOptions) { - var lbEndPoints []*v3endpointpb.LbEndpoint - for i, a := range addrsWithPort { - host, portStr, err := net.SplitHostPort(a) - if err != nil { - panic("failed to split " + a) - } - port, err := strconv.Atoi(portStr) - if err != nil { - panic("failed to atoi " + portStr) - } - - lbe := &v3endpointpb.LbEndpoint{ - HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ - Endpoint: &v3endpointpb.Endpoint{ - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Protocol: v3corepb.SocketAddress_TCP, - Address: host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: uint32(port)}}}}}}, - } - if opts != nil { - if i < len(opts.Health) { - lbe.HealthStatus = opts.Health[i] - } - if i < len(opts.Weight) { - lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: opts.Weight[i]} - } - } - lbEndPoints = append(lbEndPoints, lbe) - } - - var localityID *v3corepb.Locality - if subzone != "" { - localityID = &v3corepb.Locality{ - Region: "", - Zone: "", - SubZone: subzone, - } - } - - clab.v.Endpoints = append(clab.v.Endpoints, &v3endpointpb.LocalityLbEndpoints{ - Locality: localityID, - LbEndpoints: lbEndPoints, - LoadBalancingWeight: &wrapperspb.UInt32Value{Value: weight}, - Priority: priority, - }) -} - -// Build builds ClusterLoadAssignment. -func (clab *claBuilder) Build() *v3endpointpb.ClusterLoadAssignment { - return clab.v -} diff --git a/xds/internal/client/lds_test.go b/xds/internal/client/lds_test.go deleted file mode 100644 index df8098df8368..000000000000 --- a/xds/internal/client/lds_test.go +++ /dev/null @@ -1,2016 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "fmt" - "net" - "strings" - "testing" - "time" - - v1typepb "github.com/cncf/udpa/go/udpa/type/v1" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - spb "github.com/golang/protobuf/ptypes/struct" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/protobuf/types/known/durationpb" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v2httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" - v2listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - anypb "github.com/golang/protobuf/ptypes/any" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" -) - -func (s) TestUnmarshalListener_ClientSide(t *testing.T) { - const ( - v2LDSTarget = "lds.target.good:2222" - v3LDSTarget = "lds.target.good:3333" - v2RouteConfigName = "v2RouteConfig" - v3RouteConfigName = "v3RouteConfig" - ) - - var ( - v2Lis = &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v2httppb.HttpConnectionManager{ - RouteSpecifier: &v2httppb.HttpConnectionManager_Rds{ - Rds: &v2httppb.Rds{ - ConfigSource: &v2corepb.ConfigSource{ - ConfigSourceSpecifier: &v2corepb.ConfigSource_Ads{Ads: &v2corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: v2RouteConfigName, - }, - }, - } - mcm, _ := proto.Marshal(cm) - lis := &v2xdspb.Listener{ - Name: v2LDSTarget, - ApiListener: &v2listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2HTTPConnManagerURL, - Value: mcm, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } - customFilter = &v3httppb.HttpFilter{ - Name: "customFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, - } - typedStructFilter = &v3httppb.HttpFilter{ - Name: "customFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: wrappedCustomFilterTypedStructConfig}, - } - customOptionalFilter = &v3httppb.HttpFilter{ - Name: "customFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, - IsOptional: true, - } - customFilter2 = &v3httppb.HttpFilter{ - Name: "customFilter2", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, - } - errFilter = &v3httppb.HttpFilter{ - Name: "errFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: errFilterConfig}, - } - errOptionalFilter = &v3httppb.HttpFilter{ - Name: "errFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: errFilterConfig}, - IsOptional: true, - } - clientOnlyCustomFilter = &v3httppb.HttpFilter{ - Name: "clientOnlyCustomFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: clientOnlyCustomFilterConfig}, - } - serverOnlyCustomFilter = &v3httppb.HttpFilter{ - Name: "serverOnlyCustomFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, - } - serverOnlyOptionalCustomFilter = &v3httppb.HttpFilter{ - Name: "serverOnlyOptionalCustomFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, - IsOptional: true, - } - unknownFilter = &v3httppb.HttpFilter{ - Name: "unknownFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: unknownFilterConfig}, - } - unknownOptionalFilter = &v3httppb.HttpFilter{ - Name: "unknownFilter", - ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: unknownFilterConfig}, - IsOptional: true, - } - v3LisWithFilters = func(fs ...*v3httppb.HttpFilter) *anypb.Any { - hcm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: v3RouteConfigName, - }, - }, - CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ - MaxStreamDuration: durationpb.New(time.Second), - }, - HttpFilters: fs, - } - return &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - mcm := marshalAny(hcm) - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: mcm, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } - } - ) - const testVersion = "test-version-lds-client" - - tests := []struct { - name string - resources []*anypb.Any - wantUpdate map[string]ListenerUpdate - wantMD UpdateMetadata - wantErr bool - disableFI bool // disable fault injection - }{ - { - name: "non-listener resource", - resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "badly marshaled listener resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V3HTTPConnManagerURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "wrong type in apiListener", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: v3RouteConfigName, - }, - }, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "empty httpConnMgr in apiListener", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{}, - }, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "scopedRoutes routeConfig in apiListener", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "rds.ConfigSource in apiListener is not ADS", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Path{ - Path: "/some/path", - }, - }, - RouteConfigName: v3RouteConfigName, - }, - }, - } - mcm, _ := proto.Marshal(cm) - return mcm - }(), - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "empty resource list", - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with no filters", - resources: []*anypb.Any{v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with custom filter", - resources: []*anypb.Any{v3LisWithFilters(customFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }}, - Raw: v3LisWithFilters(customFilter), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with custom filter in typed struct", - resources: []*anypb.Any{v3LisWithFilters(typedStructFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterTypedStructConfig}, - }}, - Raw: v3LisWithFilters(typedStructFilter), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with optional custom filter", - resources: []*anypb.Any{v3LisWithFilters(customOptionalFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }}, - Raw: v3LisWithFilters(customOptionalFilter), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with custom filter, fault injection disabled", - resources: []*anypb.Any{v3LisWithFilters(customFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters(customFilter)}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - disableFI: true, - }, - { - name: "v3 with two filters with same name", - resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "v3 with two filters - same type different name", - resources: []*anypb.Any{v3LisWithFilters(customFilter, customFilter2)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "customFilter", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }, { - Name: "customFilter2", - Filter: httpFilter{}, - Config: filterConfig{Cfg: customFilterConfig}, - }}, - Raw: v3LisWithFilters(customFilter, customFilter2), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with server-only filter", - resources: []*anypb.Any{v3LisWithFilters(serverOnlyCustomFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "v3 with optional server-only filter", - resources: []*anypb.Any{v3LisWithFilters(serverOnlyOptionalCustomFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, - MaxStreamDuration: time.Second, - Raw: v3LisWithFilters(serverOnlyOptionalCustomFilter), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with client-only filter", - resources: []*anypb.Any{v3LisWithFilters(clientOnlyCustomFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, - HTTPFilters: []HTTPFilter{{ - Name: "clientOnlyCustomFilter", - Filter: clientOnlyHTTPFilter{}, - Config: filterConfig{Cfg: clientOnlyCustomFilterConfig}, - }}, - Raw: v3LisWithFilters(clientOnlyCustomFilter), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with err filter", - resources: []*anypb.Any{v3LisWithFilters(errFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "v3 with optional err filter", - resources: []*anypb.Any{v3LisWithFilters(errOptionalFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "v3 with unknown filter", - resources: []*anypb.Any{v3LisWithFilters(unknownFilter)}, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "v3 with unknown filter (optional)", - resources: []*anypb.Any{v3LisWithFilters(unknownOptionalFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, - MaxStreamDuration: time.Second, - Raw: v3LisWithFilters(unknownOptionalFilter), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 with error filter, fault injection disabled", - resources: []*anypb.Any{v3LisWithFilters(errFilter)}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - RouteConfigName: v3RouteConfigName, - MaxStreamDuration: time.Second, - Raw: v3LisWithFilters(errFilter), - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - disableFI: true, - }, - { - name: "v2 listener resource", - resources: []*anypb.Any{v2Lis}, - wantUpdate: map[string]ListenerUpdate{ - v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 listener resource", - resources: []*anypb.Any{v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "multiple listener resources", - resources: []*anypb.Any{v2Lis, v3LisWithFilters()}, - wantUpdate: map[string]ListenerUpdate{ - v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis}, - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - // To test that unmarshal keeps processing on errors. - name: "good and bad listener resources", - resources: []*anypb.Any{ - v2Lis, - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: "bad", - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: version.V2ListenerURL, - Value: func() []byte { - cm := &v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, - } - mcm, _ := proto.Marshal(cm) - return mcm - }()}}} - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - v3LisWithFilters(), - }, - wantUpdate: map[string]ListenerUpdate{ - v2LDSTarget: {RouteConfigName: v2RouteConfigName, Raw: v2Lis}, - v3LDSTarget: {RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, Raw: v3LisWithFilters()}, - "bad": {}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - oldFI := env.FaultInjectionSupport - env.FaultInjectionSupport = !test.disableFI - - update, md, err := UnmarshalListener(testVersion, test.resources, nil) - if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalListener(), got err: %v, wantErr: %v", err, test.wantErr) - } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) - } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) - } - env.FaultInjectionSupport = oldFI - }) - } -} - -func (s) TestUnmarshalListener_ServerSide(t *testing.T) { - const v3LDSTarget = "grpc/server?xds.resource.listening_address=0.0.0.0:9999" - - var ( - listenerEmptyTransportSocket = &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } - listenerNoValidationContext = &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - }, - DefaultFilterChain: &v3listenerpb.FilterChain{ - Name: "default-filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "defaultIdentityPluginInstance", - CertificateName: "defaultIdentityCertName", - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } - listenerWithValidationContext = &anypb.Any{ - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "rootPluginInstance", - CertificateName: "rootCertName", - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - }, - DefaultFilterChain: &v3listenerpb.FilterChain{ - Name: "default-filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "defaultIdentityPluginInstance", - CertificateName: "defaultIdentityCertName", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "defaultRootPluginInstance", - CertificateName: "defaultRootCertName", - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - } - ) - - const testVersion = "test-version-lds-server" - - tests := []struct { - name string - resources []*anypb.Any - wantUpdate map[string]ListenerUpdate - wantMD UpdateMetadata - wantErr string - }{ - { - name: "non-empty listener filters", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - ListenerFilters: []*v3listenerpb.ListenerFilter{ - {Name: "listener-filter-1"}, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "unsupported field 'listener_filters'", - }, - { - name: "use_original_dst is set", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - UseOriginalDst: &wrapperspb.BoolValue{Value: true}, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "unsupported field 'use_original_dst'", - }, - { - name: "no address field", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "no address field in LDS response", - }, - { - name: "no socket address field", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{}, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "no socket_address field in LDS response", - }, - { - name: "unexpected transport socket name", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "unsupported-transport-socket-name", - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "transport_socket field has unexpected name", - }, - { - name: "unexpected transport socket typedConfig URL", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3UpstreamTLSContextURL, - }, - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "transport_socket field has unexpected typeURL", - }, - { - name: "badly marshaled transport socket", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", - }, - { - name: "missing CommonTlsContext", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{} - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", - }, - { - name: "unsupported validation context in transport socket", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ - ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ - Name: "foo-sds-secret", - }, - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "validation context contains unexpected type", - }, - { - name: "empty transport socket", - resources: []*anypb.Any{listenerEmptyTransportSocket}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: []*FilterChain{{Match: &FilterChainMatch{}}}, - }, - Raw: listenerEmptyTransportSocket, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "no identity and root certificate providers", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - }, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", - }, - { - name: "no identity certificate provider with require_client_cert", - resources: []*anypb.Any{ - { - TypeUrl: version.V3ListenerURL, - Value: func() []byte { - lis := &v3listenerpb.Listener{ - Name: v3LDSTarget, - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: "0.0.0.0", - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: 9999, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{}, - } - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }, - }, - }, - }, - }, - } - mLis, _ := proto.Marshal(lis) - return mLis - }(), - }, - }, - wantUpdate: map[string]ListenerUpdate{v3LDSTarget: {}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", - }, - { - name: "happy case with no validation context", - resources: []*anypb.Any{listenerNoValidationContext}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: []*FilterChain{ - { - Match: &FilterChainMatch{}, - SecurityCfg: &SecurityConfig{ - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", - }, - }, - }, - DefaultFilterChain: &FilterChain{ - Match: &FilterChainMatch{}, - SecurityCfg: &SecurityConfig{ - IdentityInstanceName: "defaultIdentityPluginInstance", - IdentityCertName: "defaultIdentityCertName", - }, - }, - }, - Raw: listenerNoValidationContext, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "happy case with validation context provider instance", - resources: []*anypb.Any{listenerWithValidationContext}, - wantUpdate: map[string]ListenerUpdate{ - v3LDSTarget: { - InboundListenerCfg: &InboundListenerConfig{ - Address: "0.0.0.0", - Port: "9999", - FilterChains: []*FilterChain{ - { - Match: &FilterChainMatch{}, - SecurityCfg: &SecurityConfig{ - RootInstanceName: "rootPluginInstance", - RootCertName: "rootCertName", - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", - RequireClientCert: true, - }, - }, - }, - DefaultFilterChain: &FilterChain{ - Match: &FilterChainMatch{}, - SecurityCfg: &SecurityConfig{ - RootInstanceName: "defaultRootPluginInstance", - RootCertName: "defaultRootCertName", - IdentityInstanceName: "defaultIdentityPluginInstance", - IdentityCertName: "defaultIdentityCertName", - RequireClientCert: true, - }, - }, - }, - Raw: listenerWithValidationContext, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - gotUpdate, md, err := UnmarshalListener(testVersion, test.resources, nil) - if (err != nil) != (test.wantErr != "") { - t.Fatalf("UnmarshalListener(), got err: %v, wantErr: %v", err, test.wantErr) - } - if err != nil && !strings.Contains(err.Error(), test.wantErr) { - t.Fatalf("UnmarshalListener() = %v wantErr: %q", err, test.wantErr) - } - if diff := cmp.Diff(gotUpdate, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) - } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) - } - }) - } -} - -func (s) TestGetFilterChain(t *testing.T) { - tests := []struct { - desc string - inputFilterChain *v3listenerpb.FilterChain - wantFilterChain *FilterChain - wantErr bool - }{ - { - desc: "empty", - inputFilterChain: nil, - }, - { - desc: "unsupported destination port", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - DestinationPort: &wrapperspb.UInt32Value{ - Value: 666, - }, - }, - }, - }, - { - desc: "unsupported server names", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - ServerNames: []string{"example-server"}, - }, - }, - }, - { - desc: "unsupported transport protocol", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - TransportProtocol: "tls", - }, - }, - }, - { - desc: "unsupported application protocol", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - ApplicationProtocols: []string{"h2"}, - }, - }, - }, - { - desc: "bad dest address prefix", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "a.b.c.d", - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "bad dest prefix length", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 50, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "dest prefix ranges", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 8, - }, - }, - { - AddressPrefix: "192.168.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 24, - }, - }, - }, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - DestPrefixRanges: []net.IP{ - net.IPv4(10, 1, 1, 0), - net.IPv4(192, 168, 1, 0), - }, - }, - }, - }, - { - desc: "source type local", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - SourceType: SourceTypeSameOrLoopback, - }, - }, - }, - { - desc: "source type external", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - SourceType: SourceTypeExternal, - }, - }, - }, - { - desc: "source type any", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourceType: v3listenerpb.FilterChainMatch_ANY, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - SourceType: SourceTypeAny, - }, - }, - }, - { - desc: "bad source address prefix", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "a.b.c.d", - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "bad source prefix length", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 50, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "source prefix ranges", - inputFilterChain: &v3listenerpb.FilterChain{ - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 8, - }, - }, - { - AddressPrefix: "192.168.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 24, - }, - }, - }, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - SourcePrefixRanges: []net.IP{ - net.IPv4(10, 1, 1, 0), - net.IPv4(192, 168, 1, 0), - }, - }, - }, - }, - { - desc: "empty transport socket", - inputFilterChain: &v3listenerpb.FilterChain{}, - wantFilterChain: &FilterChain{Match: &FilterChainMatch{}}, - }, - { - desc: "bad transport socket name", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "unsupported-transport-socket-name", - }, - }, - wantErr: true, - }, - { - desc: "unexpected url in transport socket", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.UpstreamTlsContext{}), - }, - }, - }, - wantErr: true, - }, - { - desc: "badly marshaled downstream tls context", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - }, - }, - wantErr: true, - }, - { - desc: "missing common tls context", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{}), - }, - }, - }, - wantErr: true, - }, - { - desc: "unsupported validation context", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{ - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ - ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ - Name: "foo-sds-secret", - }, - }, - }, - }), - }, - }, - }, - wantErr: true, - }, - { - desc: "no identity and root certificate providers", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - }, - }), - }, - }, - }, - wantErr: true, - }, - { - desc: "no identity certificate provider with require_client_cert", - inputFilterChain: &v3listenerpb.FilterChain{ - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ - CommonTlsContext: &v3tlspb.CommonTlsContext{}, - }), - }, - }, - }, - wantErr: true, - }, - { - desc: "happy case", - inputFilterChain: &v3listenerpb.FilterChain{ - Name: "filter-chain-1", - FilterChainMatch: &v3listenerpb.FilterChainMatch{ - PrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 8, - }, - }, - }, - SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, - SourcePrefixRanges: []*v3corepb.CidrRange{ - { - AddressPrefix: "10.1.1.0", - PrefixLen: &wrapperspb.UInt32Value{ - Value: 8, - }, - }, - }, - SourcePorts: []uint32{80, 8080}, - }, - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: marshalAny(&v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "identityPluginInstance", - CertificateName: "identityCertName", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "rootPluginInstance", - CertificateName: "rootCertName", - }, - }, - }, - }), - }, - }, - }, - wantFilterChain: &FilterChain{ - Match: &FilterChainMatch{ - DestPrefixRanges: []net.IP{net.IPv4(10, 1, 1, 0)}, - SourceType: SourceTypeExternal, - SourcePrefixRanges: []net.IP{net.IPv4(10, 1, 1, 0)}, - SourcePorts: []uint32{80, 8080}, - }, - SecurityCfg: &SecurityConfig{ - RootInstanceName: "rootPluginInstance", - RootCertName: "rootCertName", - IdentityInstanceName: "identityPluginInstance", - IdentityCertName: "identityCertName", - RequireClientCert: true, - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.desc, func(t *testing.T) { - gotFilterChain, gotErr := getFilterChain(test.inputFilterChain) - if (gotErr != nil) != test.wantErr { - t.Fatalf("getFilterChain(%+v) returned error: %v, wantErr: %v", test.inputFilterChain, gotErr, test.wantErr) - } - if diff := cmp.Diff(test.wantFilterChain, gotFilterChain); diff != "" { - t.Errorf("getFilterChain(%+v) returned unexpected, diff (-want +got):\n%s", test.inputFilterChain, diff) - } - }) - } -} - -type filterConfig struct { - httpfilter.FilterConfig - Cfg proto.Message - Override proto.Message -} - -// httpFilter allows testing the http filter registry and parsing functionality. -type httpFilter struct { - httpfilter.ClientInterceptorBuilder - httpfilter.ServerInterceptorBuilder -} - -func (httpFilter) TypeURLs() []string { return []string{"custom.filter"} } - -func (httpFilter) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { - return filterConfig{Cfg: cfg}, nil -} - -func (httpFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { - return filterConfig{Override: override}, nil -} - -// errHTTPFilter returns errors no matter what is passed to ParseFilterConfig. -type errHTTPFilter struct { - httpfilter.ClientInterceptorBuilder -} - -func (errHTTPFilter) TypeURLs() []string { return []string{"err.custom.filter"} } - -func (errHTTPFilter) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { - return nil, fmt.Errorf("error from ParseFilterConfig") -} - -func (errHTTPFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { - return nil, fmt.Errorf("error from ParseFilterConfigOverride") -} - -func init() { - httpfilter.Register(httpFilter{}) - httpfilter.Register(errHTTPFilter{}) - httpfilter.Register(serverOnlyHTTPFilter{}) - httpfilter.Register(clientOnlyHTTPFilter{}) -} - -// serverOnlyHTTPFilter does not implement ClientInterceptorBuilder -type serverOnlyHTTPFilter struct { - httpfilter.ServerInterceptorBuilder -} - -func (serverOnlyHTTPFilter) TypeURLs() []string { return []string{"serverOnly.custom.filter"} } - -func (serverOnlyHTTPFilter) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { - return filterConfig{Cfg: cfg}, nil -} - -func (serverOnlyHTTPFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { - return filterConfig{Override: override}, nil -} - -// clientOnlyHTTPFilter does not implement ServerInterceptorBuilder -type clientOnlyHTTPFilter struct { - httpfilter.ClientInterceptorBuilder -} - -func (clientOnlyHTTPFilter) TypeURLs() []string { return []string{"clientOnly.custom.filter"} } - -func (clientOnlyHTTPFilter) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { - return filterConfig{Cfg: cfg}, nil -} - -func (clientOnlyHTTPFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { - return filterConfig{Override: override}, nil -} - -var customFilterConfig = &anypb.Any{ - TypeUrl: "custom.filter", - Value: []byte{1, 2, 3}, -} - -var errFilterConfig = &anypb.Any{ - TypeUrl: "err.custom.filter", - Value: []byte{1, 2, 3}, -} - -var serverOnlyCustomFilterConfig = &anypb.Any{ - TypeUrl: "serverOnly.custom.filter", - Value: []byte{1, 2, 3}, -} - -var clientOnlyCustomFilterConfig = &anypb.Any{ - TypeUrl: "clientOnly.custom.filter", - Value: []byte{1, 2, 3}, -} - -var customFilterTypedStructConfig = &v1typepb.TypedStruct{ - TypeUrl: "custom.filter", - Value: &spb.Struct{ - Fields: map[string]*spb.Value{ - "foo": {Kind: &spb.Value_StringValue{StringValue: "bar"}}, - }, - }, -} -var wrappedCustomFilterTypedStructConfig *anypb.Any - -func init() { - wrappedCustomFilterTypedStructConfig = marshalAny(customFilterTypedStructConfig) -} - -var unknownFilterConfig = &anypb.Any{ - TypeUrl: "unknown.custom.filter", - Value: []byte{1, 2, 3}, -} - -func wrappedOptionalFilter(name string) *anypb.Any { - return marshalAny(&v3routepb.FilterConfig{ - IsOptional: true, - Config: &anypb.Any{ - TypeUrl: name, - Value: []byte{1, 2, 3}, - }, - }) -} - -func marshalAny(m proto.Message) *anypb.Any { - a, err := ptypes.MarshalAny(m) - if err != nil { - panic(fmt.Sprintf("ptypes.MarshalAny(%+v) failed: %v", m, err)) - } - return a -} diff --git a/xds/internal/client/loadreport.go b/xds/internal/client/loadreport.go deleted file mode 100644 index be42a6e0c383..000000000000 --- a/xds/internal/client/loadreport.go +++ /dev/null @@ -1,139 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package client - -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal/client/load" -) - -// ReportLoad starts an load reporting stream to the given server. If the server -// is not an empty string, and is different from the management server, a new -// ClientConn will be created. -// -// The same options used for creating the Client will be used (including -// NodeProto, and dial options if necessary). -// -// It returns a Store for the user to report loads, a function to cancel the -// load reporting stream. -func (c *clientImpl) ReportLoad(server string) (*load.Store, func()) { - c.lrsMu.Lock() - defer c.lrsMu.Unlock() - - // If there's already a client to this server, use it. Otherwise, create - // one. - lrsC, ok := c.lrsClients[server] - if !ok { - lrsC = newLRSClient(c, server) - c.lrsClients[server] = lrsC - } - - store := lrsC.ref() - return store, func() { - // This is a callback, need to hold lrsMu. - c.lrsMu.Lock() - defer c.lrsMu.Unlock() - if lrsC.unRef() { - // Delete the lrsClient from map if this is the last reference. - delete(c.lrsClients, server) - } - } -} - -// lrsClient maps to one lrsServer. It contains: -// - a ClientConn to this server (only if it's different from the management -// server) -// - a load.Store that contains loads only for this server -type lrsClient struct { - parent *clientImpl - server string - - cc *grpc.ClientConn // nil if the server is same as the management server - refCount int - cancelStream func() - loadStore *load.Store -} - -// newLRSClient creates a new LRS stream to the server. -func newLRSClient(parent *clientImpl, server string) *lrsClient { - return &lrsClient{ - parent: parent, - server: server, - refCount: 0, - } -} - -// ref increments the refCount. If this is the first ref, it starts the LRS stream. -// -// Not thread-safe, caller needs to synchronize. -func (lrsC *lrsClient) ref() *load.Store { - lrsC.refCount++ - if lrsC.refCount == 1 { - lrsC.startStream() - } - return lrsC.loadStore -} - -// unRef decrements the refCount, and closes the stream if refCount reaches 0 -// (and close the cc if cc is not xDS cc). It returns whether refCount reached 0 -// after this call. -// -// Not thread-safe, caller needs to synchronize. -func (lrsC *lrsClient) unRef() (closed bool) { - lrsC.refCount-- - if lrsC.refCount != 0 { - return false - } - lrsC.parent.logger.Infof("Stopping load report to server: %s", lrsC.server) - lrsC.cancelStream() - if lrsC.cc != nil { - lrsC.cc.Close() - } - return true -} - -// startStream starts the LRS stream to the server. If server is not the same -// management server from the parent, it also creates a ClientConn. -func (lrsC *lrsClient) startStream() { - var cc *grpc.ClientConn - - lrsC.parent.logger.Infof("Starting load report to server: %s", lrsC.server) - if lrsC.server == "" || lrsC.server == lrsC.parent.config.BalancerName { - // Reuse the xDS client if server is the same. - cc = lrsC.parent.cc - } else { - lrsC.parent.logger.Infof("LRS server is different from management server, starting a new ClientConn") - ccNew, err := grpc.Dial(lrsC.server, lrsC.parent.config.Creds) - if err != nil { - // An error from a non-blocking dial indicates something serious. - lrsC.parent.logger.Infof("xds: failed to dial load report server {%s}: %v", lrsC.server, err) - return - } - cc = ccNew - lrsC.cc = ccNew - } - - var ctx context.Context - ctx, lrsC.cancelStream = context.WithCancel(context.Background()) - - // Create the store and stream. - lrsC.loadStore = load.NewStore() - go lrsC.parent.apiClient.reportLoad(ctx, cc, loadReportingOptions{loadStore: lrsC.loadStore}) -} diff --git a/xds/internal/client/rds_test.go b/xds/internal/client/rds_test.go deleted file mode 100644 index 2ca01dca9ca2..000000000000 --- a/xds/internal/client/rds_test.go +++ /dev/null @@ -1,1104 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "fmt" - "testing" - "time" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" - "github.com/golang/protobuf/proto" - anypb "github.com/golang/protobuf/ptypes/any" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/protobuf/types/known/durationpb" -) - -func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { - const ( - uninterestingDomain = "uninteresting.domain" - uninterestingClusterName = "uninterestingClusterName" - ldsTarget = "lds.target.good:1111" - routeName = "routeName" - clusterName = "clusterName" - ) - - var ( - goodRouteConfigWithFilterConfigs = func(cfgs map[string]*anypb.Any) *v3routepb.RouteConfiguration { - return &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}}, - }, - }}, - TypedPerFilterConfig: cfgs, - }}, - } - } - goodUpdateWithFilterConfigs = func(cfgs map[string]httpfilter.FilterConfig) RouteConfigUpdate { - return RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{{ - Domains: []string{ldsTarget}, - Routes: []*Route{{ - Prefix: newStringP("/"), - WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - }}, - HTTPFilterConfigOverride: cfgs, - }}, - } - } - ) - - tests := []struct { - name string - rc *v3routepb.RouteConfiguration - wantUpdate RouteConfigUpdate - wantError bool - disableFI bool // disable fault injection - }{ - { - name: "default-route-match-field-is-nil", - rc: &v3routepb.RouteConfiguration{ - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, - }, - }, - }, - }, - }, - }, - }, - wantError: true, - }, - { - name: "default-route-match-field-is-non-nil", - rc: &v3routepb.RouteConfiguration{ - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{}, - Action: &v3routepb.Route_Route{}, - }, - }, - }, - }, - }, - wantError: true, - }, - { - name: "default-route-routeaction-field-is-nil", - rc: &v3routepb.RouteConfiguration{ - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{}}, - }, - }, - }, - wantError: true, - }, - { - name: "default-route-cluster-field-is-empty", - rc: &v3routepb.RouteConfiguration{ - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_ClusterHeader{}, - }, - }, - }, - }, - }, - }, - }, - wantError: true, - }, - { - // default route's match sets case-sensitive to false. - name: "good-route-config-but-with-casesensitive-false", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, - CaseSensitive: &wrapperspb.BoolValue{Value: false}, - }, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, - }}}}}}}, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP("/"), CaseInsensitive: true, WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}}}, - }, - }, - }, - }, - { - name: "good-route-config-with-empty-string-route", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, - }, - }, - }, - }, - }, - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, - }, - }, - }, - }, - }, - }, - }, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}}}, - }, - }, - }, - }, - { - // default route's match is not empty string, but "/". - name: "good-route-config-with-slash-string-route", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, - }, - }, - }, - }, - }, - }, - }, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}}}, - }, - }, - }, - }, - { - // weights not add up to total-weight. - name: "route-config-with-weighted_clusters_weights_not_add_up", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}}, - {Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}}, - {Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}}, - }, - TotalWeight: &wrapperspb.UInt32Value{Value: 30}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - wantError: true, - }, - { - name: "good-route-config-with-weighted_clusters", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}}, - {Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}}, - {Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}}, - }, - TotalWeight: &wrapperspb.UInt32Value{Value: 10}, - }, - }, - }, - }, - }, - }, - }, - }, - }, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*Route{{ - Prefix: newStringP("/"), - WeightedClusters: map[string]WeightedCluster{ - "a": {Weight: 2}, - "b": {Weight: 3}, - "c": {Weight: 5}, - }, - }}, - }, - }, - }, - }, - { - name: "good-route-config-with-max-stream-duration", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, - MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{MaxStreamDuration: durationpb.New(time.Second)}, - }, - }, - }, - }, - }, - }, - }, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*Route{{ - Prefix: newStringP("/"), - WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - MaxStreamDuration: newDurationP(time.Second), - }}, - }, - }, - }, - }, - { - name: "good-route-config-with-grpc-timeout-header-max", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, - MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{GrpcTimeoutHeaderMax: durationpb.New(time.Second)}, - }, - }, - }, - }, - }, - }, - }, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*Route{{ - Prefix: newStringP("/"), - WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - MaxStreamDuration: newDurationP(time.Second), - }}, - }, - }, - }, - }, - { - name: "good-route-config-with-both-timeouts", - rc: &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, - MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{MaxStreamDuration: durationpb.New(2 * time.Second), GrpcTimeoutHeaderMax: durationpb.New(0)}, - }, - }, - }, - }, - }, - }, - }, - wantUpdate: RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{ldsTarget}, - Routes: []*Route{{ - Prefix: newStringP("/"), - WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, - MaxStreamDuration: newDurationP(0), - }}, - }, - }, - }, - }, - { - name: "good-route-config-with-http-filter-config", - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), - wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), - }, - { - name: "good-route-config-with-http-filter-config-typed-struct", - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterTypedStructConfig}), - wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterTypedStructConfig}}), - }, - { - name: "good-route-config-with-optional-http-filter-config", - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("custom.filter")}), - wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), - }, - { - name: "good-route-config-with-http-err-filter-config", - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), - wantError: true, - }, - { - name: "good-route-config-with-http-optional-err-filter-config", - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("err.custom.filter")}), - wantError: true, - }, - { - name: "good-route-config-with-http-unknown-filter-config", - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": unknownFilterConfig}), - wantError: true, - }, - { - name: "good-route-config-with-http-optional-unknown-filter-config", - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("unknown.custom.filter")}), - wantUpdate: goodUpdateWithFilterConfigs(nil), - }, - { - name: "good-route-config-with-http-err-filter-config-fi-disabled", - disableFI: true, - rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), - wantUpdate: goodUpdateWithFilterConfigs(nil), - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - oldFI := env.FaultInjectionSupport - env.FaultInjectionSupport = !test.disableFI - - gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc, nil, false) - if (gotError != nil) != test.wantError || - !cmp.Equal(gotUpdate, test.wantUpdate, cmpopts.EquateEmpty(), - cmp.Transformer("FilterConfig", func(fc httpfilter.FilterConfig) string { - return fmt.Sprint(fc) - })) { - t.Errorf("generateRDSUpdateFromRouteConfiguration(%+v, %v) returned unexpected, diff (-want +got):\\n%s", test.rc, ldsTarget, cmp.Diff(test.wantUpdate, gotUpdate, cmpopts.EquateEmpty())) - - env.FaultInjectionSupport = oldFI - } - }) - } -} - -func (s) TestUnmarshalRouteConfig(t *testing.T) { - const ( - ldsTarget = "lds.target.good:1111" - uninterestingDomain = "uninteresting.domain" - uninterestingClusterName = "uninterestingClusterName" - v2RouteConfigName = "v2RouteConfig" - v3RouteConfigName = "v3RouteConfig" - v2ClusterName = "v2Cluster" - v3ClusterName = "v3Cluster" - ) - - var ( - v2VirtualHost = []*v2routepb.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*v2routepb.Route{ - { - Match: &v2routepb.RouteMatch{PathSpecifier: &v2routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v2routepb.Route_Route{ - Route: &v2routepb.RouteAction{ - ClusterSpecifier: &v2routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, - }, - }, - }, - }, - }, - { - Domains: []string{ldsTarget}, - Routes: []*v2routepb.Route{ - { - Match: &v2routepb.RouteMatch{PathSpecifier: &v2routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v2routepb.Route_Route{ - Route: &v2routepb.RouteAction{ - ClusterSpecifier: &v2routepb.RouteAction_Cluster{Cluster: v2ClusterName}, - }, - }, - }, - }, - }, - } - v2RouteConfig = &anypb.Any{ - TypeUrl: version.V2RouteConfigURL, - Value: func() []byte { - rc := &v2xdspb.RouteConfiguration{ - Name: v2RouteConfigName, - VirtualHosts: v2VirtualHost, - } - m, _ := proto.Marshal(rc) - return m - }(), - } - v3VirtualHost = []*v3routepb.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, - }, - }, - }, - }, - }, - { - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: v3ClusterName}, - }, - }, - }, - }, - }, - } - v3RouteConfig = &anypb.Any{ - TypeUrl: version.V2RouteConfigURL, - Value: func() []byte { - rc := &v3routepb.RouteConfiguration{ - Name: v3RouteConfigName, - VirtualHosts: v3VirtualHost, - } - m, _ := proto.Marshal(rc) - return m - }(), - } - ) - const testVersion = "test-version-rds" - - tests := []struct { - name string - resources []*anypb.Any - wantUpdate map[string]RouteConfigUpdate - wantMD UpdateMetadata - wantErr bool - }{ - { - name: "non-routeConfig resource type", - resources: []*anypb.Any{{TypeUrl: version.V3HTTPConnManagerURL}}, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "badly marshaled routeconfig resource", - resources: []*anypb.Any{ - { - TypeUrl: version.V3RouteConfigURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - { - name: "empty resource list", - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v2 routeConfig resource", - resources: []*anypb.Any{v2RouteConfig}, - wantUpdate: map[string]RouteConfigUpdate{ - v2RouteConfigName: { - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}}}, - }, - }, - Raw: v2RouteConfig, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "v3 routeConfig resource", - resources: []*anypb.Any{v3RouteConfig}, - wantUpdate: map[string]RouteConfigUpdate{ - v3RouteConfigName: { - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}}}, - }, - }, - Raw: v3RouteConfig, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - name: "multiple routeConfig resources", - resources: []*anypb.Any{v2RouteConfig, v3RouteConfig}, - wantUpdate: map[string]RouteConfigUpdate{ - v3RouteConfigName: { - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}}}, - }, - }, - Raw: v3RouteConfig, - }, - v2RouteConfigName: { - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}}}, - }, - }, - Raw: v2RouteConfig, - }, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusACKed, - Version: testVersion, - }, - }, - { - // To test that unmarshal keeps processing on errors. - name: "good and bad routeConfig resources", - resources: []*anypb.Any{ - v2RouteConfig, - { - TypeUrl: version.V2RouteConfigURL, - Value: func() []byte { - rc := &v3routepb.RouteConfiguration{ - Name: "bad", - VirtualHosts: []*v3routepb.VirtualHost{ - {Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_ConnectMatcher_{}}, - }}}}} - m, _ := proto.Marshal(rc) - return m - }(), - }, - v3RouteConfig, - }, - wantUpdate: map[string]RouteConfigUpdate{ - v3RouteConfigName: { - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}}}, - }, - }, - Raw: v3RouteConfig, - }, - v2RouteConfigName: { - VirtualHosts: []*VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{ldsTarget}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{v2ClusterName: {Weight: 1}}}}, - }, - }, - Raw: v2RouteConfig, - }, - "bad": {}, - }, - wantMD: UpdateMetadata{ - Status: ServiceStatusNACKed, - Version: testVersion, - ErrState: &UpdateErrorMetadata{ - Version: testVersion, - Err: errPlaceHolder, - }, - }, - wantErr: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - update, md, err := UnmarshalRouteConfig(testVersion, test.resources, nil) - if (err != nil) != test.wantErr { - t.Fatalf("UnmarshalRouteConfig(), got err: %v, wantErr: %v", err, test.wantErr) - } - if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { - t.Errorf("got unexpected update, diff (-got +want): %v", diff) - } - if diff := cmp.Diff(md, test.wantMD, cmpOptsIgnoreDetails); diff != "" { - t.Errorf("got unexpected metadata, diff (-got +want): %v", diff) - } - }) - } -} - -func (s) TestRoutesProtoToSlice(t *testing.T) { - var ( - goodRouteWithFilterConfigs = func(cfgs map[string]*anypb.Any) []*v3routepb.Route { - // Sets per-filter config in cluster "B" and in the route. - return []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, - CaseSensitive: &wrapperspb.BoolValue{Value: false}, - }, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}, TypedPerFilterConfig: cfgs}, - {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, - }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, - }}}}, - TypedPerFilterConfig: cfgs, - }} - } - goodUpdateWithFilterConfigs = func(cfgs map[string]httpfilter.FilterConfig) []*Route { - // Sets per-filter config in cluster "B" and in the route. - return []*Route{{ - Prefix: newStringP("/"), - CaseInsensitive: true, - WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60, HTTPFilterConfigOverride: cfgs}}, - HTTPFilterConfigOverride: cfgs, - }} - } - ) - - tests := []struct { - name string - routes []*v3routepb.Route - wantRoutes []*Route - wantErr bool - disableFI bool // disable fault injection - }{ - { - name: "no path", - routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{}, - }}, - wantErr: true, - }, - { - name: "case_sensitive is false", - routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, - CaseSensitive: &wrapperspb.BoolValue{Value: false}, - }, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, - {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, - }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, - }}}}, - }}, - wantRoutes: []*Route{{ - Prefix: newStringP("/"), - CaseInsensitive: true, - WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, - }}, - }, - { - name: "good", - routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, - Headers: []*v3routepb.HeaderMatcher{ - { - Name: "th", - HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{ - PrefixMatch: "tv", - }, - InvertMatch: true, - }, - }, - RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ - DefaultValue: &v3typepb.FractionalPercent{ - Numerator: 1, - Denominator: v3typepb.FractionalPercent_HUNDRED, - }, - }, - }, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, - {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, - }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, - }}}}, - }, - }, - wantRoutes: []*Route{{ - Prefix: newStringP("/a/"), - Headers: []*HeaderMatcher{ - { - Name: "th", - InvertMatch: newBoolP(true), - PrefixMatch: newStringP("tv"), - }, - }, - Fraction: newUInt32P(10000), - WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, - }}, - wantErr: false, - }, - { - name: "query is ignored", - routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, - }, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, - {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, - }, - TotalWeight: &wrapperspb.UInt32Value{Value: 100}, - }}}}, - }, - { - Name: "with_query", - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/b/"}, - QueryParameters: []*v3routepb.QueryParameterMatcher{{Name: "route_will_be_ignored"}}, - }, - }, - }, - // Only one route in the result, because the second one with query - // parameters is ignored. - wantRoutes: []*Route{{ - Prefix: newStringP("/a/"), - WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, - }}, - wantErr: false, - }, - { - name: "unrecognized path specifier", - routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_ConnectMatcher_{}, - }, - }, - }, - wantErr: true, - }, - { - name: "unrecognized header match specifier", - routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, - Headers: []*v3routepb.HeaderMatcher{ - { - Name: "th", - HeaderMatchSpecifier: &v3routepb.HeaderMatcher_HiddenEnvoyDeprecatedRegexMatch{}, - }, - }, - }, - }, - }, - wantErr: true, - }, - { - name: "no cluster in weighted clusters action", - routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, - }, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{}}}}, - }, - }, - wantErr: true, - }, - { - name: "all 0-weight clusters in weighted clusters action", - routes: []*v3routepb.Route{ - { - Match: &v3routepb.RouteMatch{ - PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, - }, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ - WeightedClusters: &v3routepb.WeightedCluster{ - Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ - {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 0}}, - {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 0}}, - }, - TotalWeight: &wrapperspb.UInt32Value{Value: 0}, - }}}}, - }, - }, - wantErr: true, - }, - { - name: "with custom HTTP filter config", - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), - wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), - }, - { - name: "with custom HTTP filter config in typed struct", - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterTypedStructConfig}), - wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterTypedStructConfig}}), - }, - { - name: "with optional custom HTTP filter config", - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("custom.filter")}), - wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), - }, - { - name: "with custom HTTP filter config, FI disabled", - disableFI: true, - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), - wantRoutes: goodUpdateWithFilterConfigs(nil), - }, - { - name: "with erroring custom HTTP filter config", - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), - wantErr: true, - }, - { - name: "with optional erroring custom HTTP filter config", - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("err.custom.filter")}), - wantErr: true, - }, - { - name: "with erroring custom HTTP filter config, FI disabled", - disableFI: true, - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), - wantRoutes: goodUpdateWithFilterConfigs(nil), - }, - { - name: "with unknown custom HTTP filter config", - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": unknownFilterConfig}), - wantErr: true, - }, - { - name: "with optional unknown custom HTTP filter config", - routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("unknown.custom.filter")}), - wantRoutes: goodUpdateWithFilterConfigs(nil), - }, - } - - cmpOpts := []cmp.Option{ - cmp.AllowUnexported(Route{}, HeaderMatcher{}, Int64Range{}), - cmpopts.EquateEmpty(), - cmp.Transformer("FilterConfig", func(fc httpfilter.FilterConfig) string { - return fmt.Sprint(fc) - }), - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - oldFI := env.FaultInjectionSupport - env.FaultInjectionSupport = !tt.disableFI - - got, err := routesProtoToSlice(tt.routes, nil, false) - if (err != nil) != tt.wantErr { - t.Errorf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !cmp.Equal(got, tt.wantRoutes, cmpOpts...) { - t.Errorf("routesProtoToSlice() got = %v, want %v, diff: %v", got, tt.wantRoutes, cmp.Diff(got, tt.wantRoutes, cmpOpts...)) - } - - env.FaultInjectionSupport = oldFI - }) - } -} - -func newStringP(s string) *string { - return &s -} - -func newUInt32P(i uint32) *uint32 { - return &i -} - -func newBoolP(b bool) *bool { - return &b -} - -func newDurationP(d time.Duration) *time.Duration { - return &d -} diff --git a/xds/internal/client/requests_counter.go b/xds/internal/client/requests_counter.go deleted file mode 100644 index 7ef18345ed6c..000000000000 --- a/xds/internal/client/requests_counter.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "fmt" - "sync" - "sync/atomic" -) - -type servicesRequestsCounter struct { - mu sync.Mutex - services map[string]*ServiceRequestsCounter -} - -var src = &servicesRequestsCounter{ - services: make(map[string]*ServiceRequestsCounter), -} - -// ServiceRequestsCounter is used to track the total inflight requests for a -// service with the provided name. -type ServiceRequestsCounter struct { - ServiceName string - numRequests uint32 -} - -// GetServiceRequestsCounter returns the ServiceRequestsCounter with the -// provided serviceName. If one does not exist, it creates it. -func GetServiceRequestsCounter(serviceName string) *ServiceRequestsCounter { - src.mu.Lock() - defer src.mu.Unlock() - c, ok := src.services[serviceName] - if !ok { - c = &ServiceRequestsCounter{ServiceName: serviceName} - src.services[serviceName] = c - } - return c -} - -// StartRequest starts a request for a service, incrementing its number of -// requests by 1. Returns an error if the max number of requests is exceeded. -func (c *ServiceRequestsCounter) StartRequest(max uint32) error { - if atomic.LoadUint32(&c.numRequests) >= max { - return fmt.Errorf("max requests %v exceeded on service %v", max, c.ServiceName) - } - atomic.AddUint32(&c.numRequests, 1) - return nil -} - -// EndRequest ends a request for a service, decrementing its number of requests -// by 1. -func (c *ServiceRequestsCounter) EndRequest() { - atomic.AddUint32(&c.numRequests, ^uint32(0)) -} - -// ClearCounterForTesting clears the counter for the service. Should be only -// used in tests. -func ClearCounterForTesting(serviceName string) { - src.mu.Lock() - defer src.mu.Unlock() - c, ok := src.services[serviceName] - if !ok { - return - } - c.numRequests = 0 -} diff --git a/xds/internal/client/singleton.go b/xds/internal/client/singleton.go deleted file mode 100644 index 99f195341acd..000000000000 --- a/xds/internal/client/singleton.go +++ /dev/null @@ -1,143 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "fmt" - "sync" - "time" - - "google.golang.org/grpc/xds/internal/client/bootstrap" -) - -const defaultWatchExpiryTimeout = 15 * time.Second - -// This is the Client returned by New(). It contains one client implementation, -// and maintains the refcount. -var singletonClient = &Client{} - -// To override in tests. -var bootstrapNewConfig = bootstrap.NewConfig - -// Client is a full fledged gRPC client which queries a set of discovery APIs -// (collectively termed as xDS) on a remote management server, to discover -// various dynamic resources. -// -// The xds client is a singleton. It will be shared by the xds resolver and -// balancer implementations, across multiple ClientConns and Servers. -type Client struct { - *clientImpl - - // This mu protects all the fields, including the embedded clientImpl above. - mu sync.Mutex - refCount int -} - -// New returns a new xdsClient configured by the bootstrap file specified in env -// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. -// -// The returned xdsClient is a singleton. This function creates the xds client -// if it doesn't already exist. -// -// Note that the first invocation of New() or NewWithConfig() sets the client -// singleton. The following calls will return the singleton xds client without -// checking or using the config. -func New() (*Client, error) { - singletonClient.mu.Lock() - defer singletonClient.mu.Unlock() - // If the client implementation was created, increment ref count and return - // the client. - if singletonClient.clientImpl != nil { - singletonClient.refCount++ - return singletonClient, nil - } - - // Create the new client implementation. - config, err := bootstrapNewConfig() - if err != nil { - return nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) - } - c, err := newWithConfig(config, defaultWatchExpiryTimeout) - if err != nil { - return nil, err - } - - singletonClient.clientImpl = c - singletonClient.refCount++ - return singletonClient, nil -} - -// NewWithConfig returns a new xdsClient configured by the given config. -// -// The returned xdsClient is a singleton. This function creates the xds client -// if it doesn't already exist. -// -// Note that the first invocation of New() or NewWithConfig() sets the client -// singleton. The following calls will return the singleton xds client without -// checking or using the config. -// -// This function is internal only, for c2p resolver to use. DO NOT use this -// elsewhere. Use New() instead. -func NewWithConfig(config *bootstrap.Config) (*Client, error) { - singletonClient.mu.Lock() - defer singletonClient.mu.Unlock() - // If the client implementation was created, increment ref count and return - // the client. - if singletonClient.clientImpl != nil { - singletonClient.refCount++ - return singletonClient, nil - } - - // Create the new client implementation. - c, err := newWithConfig(config, defaultWatchExpiryTimeout) - if err != nil { - return nil, err - } - - singletonClient.clientImpl = c - singletonClient.refCount++ - return singletonClient, nil -} - -// Close closes the client. It does ref count of the xds client implementation, -// and closes the gRPC connection to the management server when ref count -// reaches 0. -func (c *Client) Close() { - c.mu.Lock() - defer c.mu.Unlock() - c.refCount-- - if c.refCount == 0 { - c.clientImpl.Close() - // Set clientImpl back to nil. So if New() is called after this, a new - // implementation will be created. - c.clientImpl = nil - } -} - -// NewWithConfigForTesting is exported for testing only. -// -// Note that this function doesn't set the singleton, so that the testing states -// don't leak. -func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout time.Duration) (*Client, error) { - cl, err := newWithConfig(config, watchExpiryTimeout) - if err != nil { - return nil, err - } - return &Client{clientImpl: cl, refCount: 1}, nil -} diff --git a/xds/internal/client/tests/README.md b/xds/internal/client/tests/README.md deleted file mode 100644 index 6dc940c103f7..000000000000 --- a/xds/internal/client/tests/README.md +++ /dev/null @@ -1 +0,0 @@ -This package contains tests which cannot live in the `client` package because they need to import one of the API client packages (which itself has a dependency on the `client` package). diff --git a/xds/internal/client/tests/client_test.go b/xds/internal/client/tests/client_test.go deleted file mode 100644 index f5a57fbcd218..000000000000 --- a/xds/internal/client/tests/client_test.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package tests_test - -import ( - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/grpctest" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" - _ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 API client. - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/version" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -const testXDSServer = "xds-server" - -func (s) TestNew(t *testing.T) { - tests := []struct { - name string - config *bootstrap.Config - wantErr bool - }{ - { - name: "empty-opts", - config: &bootstrap.Config{}, - wantErr: true, - }, - { - name: "empty-balancer-name", - config: &bootstrap.Config{ - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: testutils.EmptyNodeProtoV2, - }, - wantErr: true, - }, - { - name: "empty-dial-creds", - config: &bootstrap.Config{ - BalancerName: testXDSServer, - NodeProto: testutils.EmptyNodeProtoV2, - }, - wantErr: true, - }, - { - name: "empty-node-proto", - config: &bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - }, - wantErr: true, - }, - { - name: "node-proto-version-mismatch", - config: &bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: testutils.EmptyNodeProtoV3, - TransportAPI: version.TransportV2, - }, - wantErr: true, - }, - // TODO(easwars): Add cases for v3 API client. - { - name: "happy-case", - config: &bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithInsecure(), - NodeProto: testutils.EmptyNodeProtoV2, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - c, err := xdsclient.NewWithConfigForTesting(test.config, 15*time.Second) - if (err != nil) != test.wantErr { - t.Fatalf("New(%+v) = %v, wantErr: %v", test.config, err, test.wantErr) - } - if c != nil { - c.Close() - } - }) - } -} diff --git a/xds/internal/client/tests/dump_test.go b/xds/internal/client/tests/dump_test.go deleted file mode 100644 index 58220866eb19..000000000000 --- a/xds/internal/client/tests/dump_test.go +++ /dev/null @@ -1,511 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package tests_test - -import ( - "fmt" - "testing" - "time" - - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - "github.com/golang/protobuf/ptypes" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/protobuf/testing/protocmp" - "google.golang.org/protobuf/types/known/anypb" - "google.golang.org/protobuf/types/known/durationpb" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" - xdstestutils "google.golang.org/grpc/xds/internal/testutils" -) - -const defaultTestWatchExpiryTimeout = 500 * time.Millisecond - -func (s) TestLDSConfigDump(t *testing.T) { - const testVersion = "test-version-lds" - var ( - ldsTargets = []string{"lds.target.good:0000", "lds.target.good:1111"} - routeConfigNames = []string{"route-config-0", "route-config-1"} - listenerRaws = make(map[string]*anypb.Any, len(ldsTargets)) - ) - - for i := range ldsTargets { - listenersT := &v3listenerpb.Listener{ - Name: ldsTargets[i], - ApiListener: &v3listenerpb.ApiListener{ - ApiListener: func() *anypb.Any { - mcm, _ := ptypes.MarshalAny(&v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ - Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: routeConfigNames[i], - }, - }, - CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ - MaxStreamDuration: durationpb.New(time.Second), - }, - }) - return mcm - }(), - }, - } - anyT, err := ptypes.MarshalAny(listenersT) - if err != nil { - t.Fatalf("failed to marshal proto to any: %v", err) - } - listenerRaws[ldsTargets[i]] = anyT - } - - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, defaultTestWatchExpiryTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - // Expected unknown. - if err := compareDump(client.DumpLDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { - t.Fatalf(err.Error()) - } - - wantRequested := make(map[string]xdsclient.UpdateWithMD) - for _, n := range ldsTargets { - cancel := client.WatchListener(n, func(update xdsclient.ListenerUpdate, err error) {}) - defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} - } - // Expected requested. - if err := compareDump(client.DumpLDS, "", wantRequested); err != nil { - t.Fatalf(err.Error()) - } - - update0 := make(map[string]xdsclient.ListenerUpdate) - want0 := make(map[string]xdsclient.UpdateWithMD) - for n, r := range listenerRaws { - update0[n] = xdsclient.ListenerUpdate{Raw: r} - want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, - Raw: r, - } - } - client.NewListeners(update0, xdsclient.UpdateMetadata{Version: testVersion}) - - // Expect ACK. - if err := compareDump(client.DumpLDS, testVersion, want0); err != nil { - t.Fatalf(err.Error()) - } - - const nackVersion = "lds-version-nack" - var nackErr = fmt.Errorf("lds nack error") - client.NewListeners( - map[string]xdsclient.ListenerUpdate{ - ldsTargets[0]: {}, - }, - xdsclient.UpdateMetadata{ - ErrState: &xdsclient.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - ) - - // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsclient.UpdateWithMD) - // Though resource 0 was NACKed, the dump should show the previous ACKed raw - // message, as well as the NACK error. - wantDump[ldsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - Raw: listenerRaws[ldsTargets[0]], - } - - wantDump[ldsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, - Raw: listenerRaws[ldsTargets[1]], - } - if err := compareDump(client.DumpLDS, nackVersion, wantDump); err != nil { - t.Fatalf(err.Error()) - } -} - -func (s) TestRDSConfigDump(t *testing.T) { - const testVersion = "test-version-rds" - var ( - listenerNames = []string{"lds.target.good:0000", "lds.target.good:1111"} - rdsTargets = []string{"route-config-0", "route-config-1"} - clusterNames = []string{"cluster-0", "cluster-1"} - routeRaws = make(map[string]*anypb.Any, len(rdsTargets)) - ) - - for i := range rdsTargets { - routeConfigT := &v3routepb.RouteConfiguration{ - Name: rdsTargets[i], - VirtualHosts: []*v3routepb.VirtualHost{ - { - Domains: []string{listenerNames[i]}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &v3routepb.Route_Route{ - Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterNames[i]}, - }, - }, - }}, - }, - }, - } - - anyT, err := ptypes.MarshalAny(routeConfigT) - if err != nil { - t.Fatalf("failed to marshal proto to any: %v", err) - } - routeRaws[rdsTargets[i]] = anyT - } - - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, defaultTestWatchExpiryTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - // Expected unknown. - if err := compareDump(client.DumpRDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { - t.Fatalf(err.Error()) - } - - wantRequested := make(map[string]xdsclient.UpdateWithMD) - for _, n := range rdsTargets { - cancel := client.WatchRouteConfig(n, func(update xdsclient.RouteConfigUpdate, err error) {}) - defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} - } - // Expected requested. - if err := compareDump(client.DumpRDS, "", wantRequested); err != nil { - t.Fatalf(err.Error()) - } - - update0 := make(map[string]xdsclient.RouteConfigUpdate) - want0 := make(map[string]xdsclient.UpdateWithMD) - for n, r := range routeRaws { - update0[n] = xdsclient.RouteConfigUpdate{Raw: r} - want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, - Raw: r, - } - } - client.NewRouteConfigs(update0, xdsclient.UpdateMetadata{Version: testVersion}) - - // Expect ACK. - if err := compareDump(client.DumpRDS, testVersion, want0); err != nil { - t.Fatalf(err.Error()) - } - - const nackVersion = "rds-version-nack" - var nackErr = fmt.Errorf("rds nack error") - client.NewRouteConfigs( - map[string]xdsclient.RouteConfigUpdate{ - rdsTargets[0]: {}, - }, - xdsclient.UpdateMetadata{ - ErrState: &xdsclient.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - ) - - // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsclient.UpdateWithMD) - // Though resource 0 was NACKed, the dump should show the previous ACKed raw - // message, as well as the NACK error. - wantDump[rdsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - Raw: routeRaws[rdsTargets[0]], - } - wantDump[rdsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, - Raw: routeRaws[rdsTargets[1]], - } - if err := compareDump(client.DumpRDS, nackVersion, wantDump); err != nil { - t.Fatalf(err.Error()) - } -} - -func (s) TestCDSConfigDump(t *testing.T) { - const testVersion = "test-version-cds" - var ( - cdsTargets = []string{"cluster-0", "cluster-1"} - serviceNames = []string{"service-0", "service-1"} - clusterRaws = make(map[string]*anypb.Any, len(cdsTargets)) - ) - - for i := range cdsTargets { - clusterT := &v3clusterpb.Cluster{ - Name: cdsTargets[i], - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceNames[i], - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - LrsServer: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ - Self: &v3corepb.SelfConfigSource{}, - }, - }, - } - - anyT, err := ptypes.MarshalAny(clusterT) - if err != nil { - t.Fatalf("failed to marshal proto to any: %v", err) - } - clusterRaws[cdsTargets[i]] = anyT - } - - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, defaultTestWatchExpiryTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - // Expected unknown. - if err := compareDump(client.DumpCDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { - t.Fatalf(err.Error()) - } - - wantRequested := make(map[string]xdsclient.UpdateWithMD) - for _, n := range cdsTargets { - cancel := client.WatchCluster(n, func(update xdsclient.ClusterUpdate, err error) {}) - defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} - } - // Expected requested. - if err := compareDump(client.DumpCDS, "", wantRequested); err != nil { - t.Fatalf(err.Error()) - } - - update0 := make(map[string]xdsclient.ClusterUpdate) - want0 := make(map[string]xdsclient.UpdateWithMD) - for n, r := range clusterRaws { - update0[n] = xdsclient.ClusterUpdate{Raw: r} - want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, - Raw: r, - } - } - client.NewClusters(update0, xdsclient.UpdateMetadata{Version: testVersion}) - - // Expect ACK. - if err := compareDump(client.DumpCDS, testVersion, want0); err != nil { - t.Fatalf(err.Error()) - } - - const nackVersion = "cds-version-nack" - var nackErr = fmt.Errorf("cds nack error") - client.NewClusters( - map[string]xdsclient.ClusterUpdate{ - cdsTargets[0]: {}, - }, - xdsclient.UpdateMetadata{ - ErrState: &xdsclient.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - ) - - // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsclient.UpdateWithMD) - // Though resource 0 was NACKed, the dump should show the previous ACKed raw - // message, as well as the NACK error. - wantDump[cdsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - Raw: clusterRaws[cdsTargets[0]], - } - wantDump[cdsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, - Raw: clusterRaws[cdsTargets[1]], - } - if err := compareDump(client.DumpCDS, nackVersion, wantDump); err != nil { - t.Fatalf(err.Error()) - } -} - -func (s) TestEDSConfigDump(t *testing.T) { - const testVersion = "test-version-cds" - var ( - edsTargets = []string{"cluster-0", "cluster-1"} - localityNames = []string{"locality-0", "locality-1"} - addrs = []string{"addr0:123", "addr1:456"} - endpointRaws = make(map[string]*anypb.Any, len(edsTargets)) - ) - - for i := range edsTargets { - clab0 := xdstestutils.NewClusterLoadAssignmentBuilder(edsTargets[i], nil) - clab0.AddLocality(localityNames[i], 1, 1, []string{addrs[i]}, nil) - claT := clab0.Build() - - anyT, err := ptypes.MarshalAny(claT) - if err != nil { - t.Fatalf("failed to marshal proto to any: %v", err) - } - endpointRaws[edsTargets[i]] = anyT - } - - client, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: testXDSServer, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: xdstestutils.EmptyNodeProtoV2, - }, defaultTestWatchExpiryTimeout) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - // Expected unknown. - if err := compareDump(client.DumpEDS, "", map[string]xdsclient.UpdateWithMD{}); err != nil { - t.Fatalf(err.Error()) - } - - wantRequested := make(map[string]xdsclient.UpdateWithMD) - for _, n := range edsTargets { - cancel := client.WatchEndpoints(n, func(update xdsclient.EndpointsUpdate, err error) {}) - defer cancel() - wantRequested[n] = xdsclient.UpdateWithMD{MD: xdsclient.UpdateMetadata{Status: xdsclient.ServiceStatusRequested}} - } - // Expected requested. - if err := compareDump(client.DumpEDS, "", wantRequested); err != nil { - t.Fatalf(err.Error()) - } - - update0 := make(map[string]xdsclient.EndpointsUpdate) - want0 := make(map[string]xdsclient.UpdateWithMD) - for n, r := range endpointRaws { - update0[n] = xdsclient.EndpointsUpdate{Raw: r} - want0[n] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, - Raw: r, - } - } - client.NewEndpoints(update0, xdsclient.UpdateMetadata{Version: testVersion}) - - // Expect ACK. - if err := compareDump(client.DumpEDS, testVersion, want0); err != nil { - t.Fatalf(err.Error()) - } - - const nackVersion = "eds-version-nack" - var nackErr = fmt.Errorf("eds nack error") - client.NewEndpoints( - map[string]xdsclient.EndpointsUpdate{ - edsTargets[0]: {}, - }, - xdsclient.UpdateMetadata{ - ErrState: &xdsclient.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - ) - - // Expect NACK for [0], but old ACK for [1]. - wantDump := make(map[string]xdsclient.UpdateWithMD) - // Though resource 0 was NACKed, the dump should show the previous ACKed raw - // message, as well as the NACK error. - wantDump[edsTargets[0]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{ - Version: testVersion, - ErrState: &xdsclient.UpdateErrorMetadata{ - Version: nackVersion, - Err: nackErr, - }, - }, - Raw: endpointRaws[edsTargets[0]], - } - wantDump[edsTargets[1]] = xdsclient.UpdateWithMD{ - MD: xdsclient.UpdateMetadata{Version: testVersion}, - Raw: endpointRaws[edsTargets[1]], - } - if err := compareDump(client.DumpEDS, nackVersion, wantDump); err != nil { - t.Fatalf(err.Error()) - } -} - -func compareDump(dumpFunc func() (string, map[string]xdsclient.UpdateWithMD), wantVersion string, wantDump interface{}) error { - v, dump := dumpFunc() - if v != wantVersion { - return fmt.Errorf("Dump() returned version %q, want %q", v, wantVersion) - } - cmpOpts := cmp.Options{ - cmpopts.EquateEmpty(), - cmp.Comparer(func(a, b time.Time) bool { return true }), - cmp.Comparer(func(x, y error) bool { - if x == nil || y == nil { - return x == nil && y == nil - } - return x.Error() == y.Error() - }), - protocmp.Transform(), - } - if diff := cmp.Diff(dump, wantDump, cmpOpts); diff != "" { - return fmt.Errorf("Dump() returned unexpected dump, diff (-got +want): %s", diff) - } - return nil -} diff --git a/xds/internal/client/transport_helper.go b/xds/internal/client/transport_helper.go deleted file mode 100644 index b286a61d638b..000000000000 --- a/xds/internal/client/transport_helper.go +++ /dev/null @@ -1,508 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "context" - "sync" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/xds/internal/client/load" - - "google.golang.org/grpc" - "google.golang.org/grpc/internal/buffer" - "google.golang.org/grpc/internal/grpclog" -) - -// ErrResourceTypeUnsupported is an error used to indicate an unsupported xDS -// resource type. The wrapped ErrStr contains the details. -type ErrResourceTypeUnsupported struct { - ErrStr string -} - -// Error helps implements the error interface. -func (e ErrResourceTypeUnsupported) Error() string { - return e.ErrStr -} - -// VersionedClient is the interface to be provided by the transport protocol -// specific client implementations. This mainly deals with the actual sending -// and receiving of messages. -type VersionedClient interface { - // NewStream returns a new xDS client stream specific to the underlying - // transport protocol version. - NewStream(ctx context.Context) (grpc.ClientStream, error) - - // SendRequest constructs and sends out a DiscoveryRequest message specific - // to the underlying transport protocol version. - SendRequest(s grpc.ClientStream, resourceNames []string, rType ResourceType, version, nonce, errMsg string) error - - // RecvResponse uses the provided stream to receive a response specific to - // the underlying transport protocol version. - RecvResponse(s grpc.ClientStream) (proto.Message, error) - - // HandleResponse parses and validates the received response and notifies - // the top-level client which in turn notifies the registered watchers. - // - // Return values are: resourceType, version, nonce, error. - // If the provided protobuf message contains a resource type which is not - // supported, implementations must return an error of type - // ErrResourceTypeUnsupported. - HandleResponse(proto.Message) (ResourceType, string, string, error) - - // NewLoadStatsStream returns a new LRS client stream specific to the underlying - // transport protocol version. - NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) - - // SendFirstLoadStatsRequest constructs and sends the first request on the - // LRS stream. - SendFirstLoadStatsRequest(s grpc.ClientStream) error - - // HandleLoadStatsResponse receives the first response from the server which - // contains the load reporting interval and the clusters for which the - // server asks the client to report load for. - // - // If the response sets SendAllClusters to true, the returned clusters is - // nil. - HandleLoadStatsResponse(s grpc.ClientStream) (clusters []string, _ time.Duration, _ error) - - // SendLoadStatsRequest will be invoked at regular intervals to send load - // report with load data reported since the last time this method was - // invoked. - SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error -} - -// TransportHelper contains all xDS transport protocol related functionality -// which is common across different versioned client implementations. -// -// TransportHelper takes care of sending and receiving xDS requests and -// responses on an ADS stream. It also takes care of ACK/NACK handling. It -// delegates to the actual versioned client implementations wherever -// appropriate. -// -// Implements the APIClient interface which makes it possible for versioned -// client implementations to embed this type, and thereby satisfy the interface -// requirements. -type TransportHelper struct { - cancelCtx context.CancelFunc - - vClient VersionedClient - logger *grpclog.PrefixLogger - backoff func(int) time.Duration - streamCh chan grpc.ClientStream - sendCh *buffer.Unbounded - - mu sync.Mutex - // Message specific watch infos, protected by the above mutex. These are - // written to, after successfully reading from the update channel, and are - // read from when recovering from a broken stream to resend the xDS - // messages. When the user of this client object cancels a watch call, - // these are set to nil. All accesses to the map protected and any value - // inside the map should be protected with the above mutex. - watchMap map[ResourceType]map[string]bool - // versionMap contains the version that was acked (the version in the ack - // request that was sent on wire). The key is rType, the value is the - // version string, becaues the versions for different resource types should - // be independent. - versionMap map[ResourceType]string - // nonceMap contains the nonce from the most recent received response. - nonceMap map[ResourceType]string -} - -// NewTransportHelper creates a new transport helper to be used by versioned -// client implementations. -func NewTransportHelper(vc VersionedClient, logger *grpclog.PrefixLogger, backoff func(int) time.Duration) *TransportHelper { - ctx, cancelCtx := context.WithCancel(context.Background()) - t := &TransportHelper{ - cancelCtx: cancelCtx, - vClient: vc, - logger: logger, - backoff: backoff, - - streamCh: make(chan grpc.ClientStream, 1), - sendCh: buffer.NewUnbounded(), - watchMap: make(map[ResourceType]map[string]bool), - versionMap: make(map[ResourceType]string), - nonceMap: make(map[ResourceType]string), - } - - go t.run(ctx) - return t -} - -// AddWatch adds a watch for an xDS resource given its type and name. -func (t *TransportHelper) AddWatch(rType ResourceType, resourceName string) { - t.sendCh.Put(&watchAction{ - rType: rType, - remove: false, - resource: resourceName, - }) -} - -// RemoveWatch cancels an already registered watch for an xDS resource -// given its type and name. -func (t *TransportHelper) RemoveWatch(rType ResourceType, resourceName string) { - t.sendCh.Put(&watchAction{ - rType: rType, - remove: true, - resource: resourceName, - }) -} - -// Close closes the transport helper. -func (t *TransportHelper) Close() { - t.cancelCtx() -} - -// run starts an ADS stream (and backs off exponentially, if the previous -// stream failed without receiving a single reply) and runs the sender and -// receiver routines to send and receive data from the stream respectively. -func (t *TransportHelper) run(ctx context.Context) { - go t.send(ctx) - // TODO: start a goroutine monitoring ClientConn's connectivity state, and - // report error (and log) when stats is transient failure. - - retries := 0 - for { - select { - case <-ctx.Done(): - return - default: - } - - if retries != 0 { - timer := time.NewTimer(t.backoff(retries)) - select { - case <-timer.C: - case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } - return - } - } - - retries++ - stream, err := t.vClient.NewStream(ctx) - if err != nil { - t.logger.Warningf("xds: ADS stream creation failed: %v", err) - continue - } - t.logger.Infof("ADS stream created") - - select { - case <-t.streamCh: - default: - } - t.streamCh <- stream - if t.recv(stream) { - retries = 0 - } - } -} - -// send is a separate goroutine for sending watch requests on the xds stream. -// -// It watches the stream channel for new streams, and the request channel for -// new requests to send on the stream. -// -// For each new request (watchAction), it's -// - processed and added to the watch map -// - so resend will pick them up when there are new streams -// - sent on the current stream if there's one -// - the current stream is cleared when any send on it fails -// -// For each new stream, all the existing requests will be resent. -// -// Note that this goroutine doesn't do anything to the old stream when there's a -// new one. In fact, there should be only one stream in progress, and new one -// should only be created when the old one fails (recv returns an error). -func (t *TransportHelper) send(ctx context.Context) { - var stream grpc.ClientStream - for { - select { - case <-ctx.Done(): - return - case stream = <-t.streamCh: - if !t.sendExisting(stream) { - // send failed, clear the current stream. - stream = nil - } - case u := <-t.sendCh.Get(): - t.sendCh.Load() - - var ( - target []string - rType ResourceType - version, nonce, errMsg string - send bool - ) - switch update := u.(type) { - case *watchAction: - target, rType, version, nonce = t.processWatchInfo(update) - case *ackAction: - target, rType, version, nonce, send = t.processAckInfo(update, stream) - if !send { - continue - } - errMsg = update.errMsg - } - if stream == nil { - // There's no stream yet. Skip the request. This request - // will be resent to the new streams. If no stream is - // created, the watcher will timeout (same as server not - // sending response back). - continue - } - if err := t.vClient.SendRequest(stream, target, rType, version, nonce, errMsg); err != nil { - t.logger.Warningf("ADS request for {target: %q, type: %v, version: %q, nonce: %q} failed: %v", target, rType, version, nonce, err) - // send failed, clear the current stream. - stream = nil - } - } - } -} - -// sendExisting sends out xDS requests for registered watchers when recovering -// from a broken stream. -// -// We call stream.Send() here with the lock being held. It should be OK to do -// that here because the stream has just started and Send() usually returns -// quickly (once it pushes the message onto the transport layer) and is only -// ever blocked if we don't have enough flow control quota. -func (t *TransportHelper) sendExisting(stream grpc.ClientStream) bool { - t.mu.Lock() - defer t.mu.Unlock() - - // Reset the ack versions when the stream restarts. - t.versionMap = make(map[ResourceType]string) - t.nonceMap = make(map[ResourceType]string) - - for rType, s := range t.watchMap { - if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, "", "", ""); err != nil { - t.logger.Errorf("ADS request failed: %v", err) - return false - } - } - - return true -} - -// recv receives xDS responses on the provided ADS stream and branches out to -// message specific handlers. -func (t *TransportHelper) recv(stream grpc.ClientStream) bool { - success := false - for { - resp, err := t.vClient.RecvResponse(stream) - if err != nil { - t.logger.Warningf("ADS stream is closed with error: %v", err) - return success - } - rType, version, nonce, err := t.vClient.HandleResponse(resp) - if e, ok := err.(ErrResourceTypeUnsupported); ok { - t.logger.Warningf("%s", e.ErrStr) - continue - } - if err != nil { - t.sendCh.Put(&ackAction{ - rType: rType, - version: "", - nonce: nonce, - errMsg: err.Error(), - stream: stream, - }) - t.logger.Warningf("Sending NACK for response type: %v, version: %v, nonce: %v, reason: %v", rType, version, nonce, err) - continue - } - t.sendCh.Put(&ackAction{ - rType: rType, - version: version, - nonce: nonce, - stream: stream, - }) - t.logger.Infof("Sending ACK for response type: %v, version: %v, nonce: %v", rType, version, nonce) - success = true - } -} - -func mapToSlice(m map[string]bool) (ret []string) { - for i := range m { - ret = append(ret, i) - } - return -} - -type watchAction struct { - rType ResourceType - remove bool // Whether this is to remove watch for the resource. - resource string -} - -// processWatchInfo pulls the fields needed by the request from a watchAction. -// -// It also updates the watch map. -func (t *TransportHelper) processWatchInfo(w *watchAction) (target []string, rType ResourceType, ver, nonce string) { - t.mu.Lock() - defer t.mu.Unlock() - - var current map[string]bool - current, ok := t.watchMap[w.rType] - if !ok { - current = make(map[string]bool) - t.watchMap[w.rType] = current - } - - if w.remove { - delete(current, w.resource) - if len(current) == 0 { - delete(t.watchMap, w.rType) - } - } else { - current[w.resource] = true - } - - rType = w.rType - target = mapToSlice(current) - // We don't reset version or nonce when a new watch is started. The version - // and nonce from previous response are carried by the request unless the - // stream is recreated. - ver = t.versionMap[rType] - nonce = t.nonceMap[rType] - return target, rType, ver, nonce -} - -type ackAction struct { - rType ResourceType - version string // NACK if version is an empty string. - nonce string - errMsg string // Empty unless it's a NACK. - // ACK/NACK are tagged with the stream it's for. When the stream is down, - // all the ACK/NACK for this stream will be dropped, and the version/nonce - // won't be updated. - stream grpc.ClientStream -} - -// processAckInfo pulls the fields needed by the ack request from a ackAction. -// -// If no active watch is found for this ack, it returns false for send. -func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStream) (target []string, rType ResourceType, version, nonce string, send bool) { - if ack.stream != stream { - // If ACK's stream isn't the current sending stream, this means the ACK - // was pushed to queue before the old stream broke, and a new stream has - // been started since. Return immediately here so we don't update the - // nonce for the new stream. - return nil, UnknownResource, "", "", false - } - rType = ack.rType - - t.mu.Lock() - defer t.mu.Unlock() - - // Update the nonce no matter if we are going to send the ACK request on - // wire. We may not send the request if the watch is canceled. But the nonce - // needs to be updated so the next request will have the right nonce. - nonce = ack.nonce - t.nonceMap[rType] = nonce - - s, ok := t.watchMap[rType] - if !ok || len(s) == 0 { - // We don't send the request ack if there's no active watch (this can be - // either the server sends responses before any request, or the watch is - // canceled while the ackAction is in queue), because there's no resource - // name. And if we send a request with empty resource name list, the - // server may treat it as a wild card and send us everything. - return nil, UnknownResource, "", "", false - } - send = true - target = mapToSlice(s) - - version = ack.version - if version == "" { - // This is a nack, get the previous acked version. - version = t.versionMap[rType] - // version will still be an empty string if rType isn't - // found in versionMap, this can happen if there wasn't any ack - // before. - } else { - t.versionMap[rType] = version - } - return target, rType, version, nonce, send -} - -// reportLoad starts an LRS stream to report load data to the management server. -// It blocks until the context is cancelled. -func (t *TransportHelper) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts loadReportingOptions) { - retries := 0 - for { - if ctx.Err() != nil { - return - } - - if retries != 0 { - timer := time.NewTimer(t.backoff(retries)) - select { - case <-timer.C: - case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } - return - } - } - - retries++ - stream, err := t.vClient.NewLoadStatsStream(ctx, cc) - if err != nil { - logger.Warningf("lrs: failed to create stream: %v", err) - continue - } - logger.Infof("lrs: created LRS stream") - - if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil { - logger.Warningf("lrs: failed to send first request: %v", err) - continue - } - - clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream) - if err != nil { - logger.Warning(err) - continue - } - - retries = 0 - t.sendLoads(ctx, stream, opts.loadStore, clusters, interval) - } -} - -func (t *TransportHelper) sendLoads(ctx context.Context, stream grpc.ClientStream, store *load.Store, clusterNames []string, interval time.Duration) { - tick := time.NewTicker(interval) - defer tick.Stop() - for { - select { - case <-tick.C: - case <-ctx.Done(): - return - } - if err := t.vClient.SendLoadStatsRequest(stream, store.Stats(clusterNames)); err != nil { - logger.Warning(err) - return - } - } -} diff --git a/xds/internal/client/v2/ack_test.go b/xds/internal/client/v2/ack_test.go deleted file mode 100644 index 813d8baa79d9..000000000000 --- a/xds/internal/client/v2/ack_test.go +++ /dev/null @@ -1,484 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package v2 - -import ( - "context" - "fmt" - "strconv" - "testing" - "time" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - "github.com/golang/protobuf/proto" - anypb "github.com/golang/protobuf/ptypes/any" - "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/version" -) - -const ( - defaultTestTimeout = 5 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond -) - -func startXDSV2Client(t *testing.T, cc *grpc.ClientConn) (v2c *client, cbLDS, cbRDS, cbCDS, cbEDS *testutils.Channel, cleanup func()) { - cbLDS = testutils.NewChannel() - cbRDS = testutils.NewChannel() - cbCDS = testutils.NewChannel() - cbEDS = testutils.NewChannel() - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { - t.Logf("Received %v callback with {%+v}", rType, d) - switch rType { - case xdsclient.ListenerResource: - if _, ok := d[goodLDSTarget1]; ok { - cbLDS.Send(struct{}{}) - } - case xdsclient.RouteConfigResource: - if _, ok := d[goodRouteName1]; ok { - cbRDS.Send(struct{}{}) - } - case xdsclient.ClusterResource: - if _, ok := d[goodClusterName1]; ok { - cbCDS.Send(struct{}{}) - } - case xdsclient.EndpointsResource: - if _, ok := d[goodEDSName]; ok { - cbEDS.Send(struct{}{}) - } - } - }, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - t.Log("Started xds client...") - return v2c, cbLDS, cbRDS, cbCDS, cbEDS, v2c.Close -} - -// compareXDSRequest reads requests from channel, compare it with want. -func compareXDSRequest(ctx context.Context, ch *testutils.Channel, want *xdspb.DiscoveryRequest, ver, nonce string, wantErr bool) error { - val, err := ch.Receive(ctx) - if err != nil { - return err - } - req := val.(*fakeserver.Request) - if req.Err != nil { - return fmt.Errorf("unexpected error from request: %v", req.Err) - } - - xdsReq := req.Req.(*xdspb.DiscoveryRequest) - if (xdsReq.ErrorDetail != nil) != wantErr { - return fmt.Errorf("received request with error details: %v, wantErr: %v", xdsReq.ErrorDetail, wantErr) - } - // All NACK request.ErrorDetails have hardcoded status code InvalidArguments. - if xdsReq.ErrorDetail != nil && xdsReq.ErrorDetail.Code != int32(codes.InvalidArgument) { - return fmt.Errorf("received request with error details: %v, want status with code: %v", xdsReq.ErrorDetail, codes.InvalidArgument) - } - - xdsReq.ErrorDetail = nil // Clear the error details field before comparing. - wantClone := proto.Clone(want).(*xdspb.DiscoveryRequest) - wantClone.VersionInfo = ver - wantClone.ResponseNonce = nonce - if !cmp.Equal(xdsReq, wantClone, cmp.Comparer(proto.Equal)) { - return fmt.Errorf("received request different from want, diff: %s", cmp.Diff(req.Req, wantClone, cmp.Comparer(proto.Equal))) - } - return nil -} - -func sendXDSRespWithVersion(ch chan<- *fakeserver.Response, respWithoutVersion *xdspb.DiscoveryResponse, ver int) (nonce string) { - respToSend := proto.Clone(respWithoutVersion).(*xdspb.DiscoveryResponse) - respToSend.VersionInfo = strconv.Itoa(ver) - nonce = strconv.Itoa(int(time.Now().UnixNano())) - respToSend.Nonce = nonce - ch <- &fakeserver.Response{Resp: respToSend} - return -} - -// startXDS calls watch to send the first request. It then sends a good response -// and checks for ack. -func startXDS(ctx context.Context, t *testing.T, rType xdsclient.ResourceType, v2c *client, reqChan *testutils.Channel, req *xdspb.DiscoveryRequest, preVersion string, preNonce string) { - nameToWatch := "" - switch rType { - case xdsclient.ListenerResource: - nameToWatch = goodLDSTarget1 - case xdsclient.RouteConfigResource: - nameToWatch = goodRouteName1 - case xdsclient.ClusterResource: - nameToWatch = goodClusterName1 - case xdsclient.EndpointsResource: - nameToWatch = goodEDSName - } - v2c.AddWatch(rType, nameToWatch) - - if err := compareXDSRequest(ctx, reqChan, req, preVersion, preNonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", rType, err) - } - t.Logf("FakeServer received %v request...", rType) -} - -// sendGoodResp sends the good response, with the given version, and a random -// nonce. -// -// It also waits and checks that the ack request contains the given version, and -// the generated nonce. -func sendGoodResp(ctx context.Context, t *testing.T, rType xdsclient.ResourceType, fakeServer *fakeserver.Server, ver int, goodResp *xdspb.DiscoveryResponse, wantReq *xdspb.DiscoveryRequest, callbackCh *testutils.Channel) (string, error) { - nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodResp, ver) - t.Logf("Good %v response pushed to fakeServer...", rType) - - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, wantReq, strconv.Itoa(ver), nonce, false); err != nil { - return "", fmt.Errorf("failed to receive %v request: %v", rType, err) - } - t.Logf("Good %v response acked", rType) - - if _, err := callbackCh.Receive(ctx); err != nil { - return "", fmt.Errorf("timeout when expecting %v update", rType) - } - t.Logf("Good %v response callback executed", rType) - return nonce, nil -} - -// sendBadResp sends a bad response with the given version. This response will -// be nacked, so we expect a request with the previous version (version-1). -// -// But the nonce in request should be the new nonce. -func sendBadResp(ctx context.Context, t *testing.T, rType xdsclient.ResourceType, fakeServer *fakeserver.Server, ver int, wantReq *xdspb.DiscoveryRequest) error { - var typeURL string - switch rType { - case xdsclient.ListenerResource: - typeURL = version.V2ListenerURL - case xdsclient.RouteConfigResource: - typeURL = version.V2RouteConfigURL - case xdsclient.ClusterResource: - typeURL = version.V2ClusterURL - case xdsclient.EndpointsResource: - typeURL = version.V2EndpointsURL - } - nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{{}}, - TypeUrl: typeURL, - }, ver) - t.Logf("Bad %v response pushed to fakeServer...", rType) - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, wantReq, strconv.Itoa(ver-1), nonce, true); err != nil { - return fmt.Errorf("failed to receive %v request: %v", rType, err) - } - t.Logf("Bad %v response nacked", rType) - return nil -} - -// TestV2ClientAck verifies that valid responses are acked, and invalid ones -// are nacked. -// -// This test also verifies the version for different types are independent. -func (s) TestV2ClientAck(t *testing.T) { - var ( - versionLDS = 1000 - versionRDS = 2000 - versionCDS = 3000 - versionEDS = 4000 - ) - - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, cbLDS, cbRDS, cbCDS, cbEDS, v2cCleanup := startXDSV2Client(t, cc) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsclient.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { - t.Fatal(err) - } - versionLDS++ - startXDS(ctx, t, xdsclient.RouteConfigResource, v2c, fakeServer.XDSRequestChan, goodRDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsclient.RouteConfigResource, fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS); err != nil { - t.Fatal(err) - } - versionRDS++ - startXDS(ctx, t, xdsclient.ClusterResource, v2c, fakeServer.XDSRequestChan, goodCDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { - t.Fatal(err) - } - versionCDS++ - startXDS(ctx, t, xdsclient.EndpointsResource, v2c, fakeServer.XDSRequestChan, goodEDSRequest, "", "") - if _, err := sendGoodResp(ctx, t, xdsclient.EndpointsResource, fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS); err != nil { - t.Fatal(err) - } - versionEDS++ - - // Send a bad response, and check for nack. - if err := sendBadResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSRequest); err != nil { - t.Fatal(err) - } - versionLDS++ - if err := sendBadResp(ctx, t, xdsclient.RouteConfigResource, fakeServer, versionRDS, goodRDSRequest); err != nil { - t.Fatal(err) - } - versionRDS++ - if err := sendBadResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { - t.Fatal(err) - } - versionCDS++ - if err := sendBadResp(ctx, t, xdsclient.EndpointsResource, fakeServer, versionEDS, goodEDSRequest); err != nil { - t.Fatal(err) - } - versionEDS++ - - // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { - t.Fatal(err) - } - versionLDS++ - if _, err := sendGoodResp(ctx, t, xdsclient.RouteConfigResource, fakeServer, versionRDS, goodRDSResponse1, goodRDSRequest, cbRDS); err != nil { - t.Fatal(err) - } - versionRDS++ - if _, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { - t.Fatal(err) - } - versionCDS++ - if _, err := sendGoodResp(ctx, t, xdsclient.EndpointsResource, fakeServer, versionEDS, goodEDSResponse1, goodEDSRequest, cbEDS); err != nil { - t.Fatal(err) - } - versionEDS++ -} - -// Test when the first response is invalid, and is nacked, the nack requests -// should have an empty version string. -func (s) TestV2ClientAckFirstIsNack(t *testing.T) { - var versionLDS = 1000 - - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, cbLDS, _, _, _, v2cCleanup := startXDSV2Client(t, cc) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsclient.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") - - nonce := sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{{}}, - TypeUrl: version.V2ListenerURL, - }, versionLDS) - t.Logf("Bad response pushed to fakeServer...") - - // The expected version string is an empty string, because this is the first - // response, and it's nacked (so there's no previous ack version). - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodLDSRequest, "", nonce, true); err != nil { - t.Errorf("Failed to receive request: %v", err) - } - t.Logf("Bad response nacked") - versionLDS++ - - sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) - versionLDS++ -} - -// Test when a nack is sent after a new watch, we nack with the previous acked -// version (instead of resetting to empty string). -func (s) TestV2ClientAckNackAfterNewWatch(t *testing.T) { - var versionLDS = 1000 - - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, cbLDS, _, _, _, v2cCleanup := startXDSV2Client(t, cc) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start the watch, send a good response, and check for ack. - startXDS(ctx, t, xdsclient.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, "", "") - nonce, err := sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS) - if err != nil { - t.Fatal(err) - } - // Start a new watch. The version in the new request should be the version - // from the previous response, thus versionLDS before ++. - startXDS(ctx, t, xdsclient.ListenerResource, v2c, fakeServer.XDSRequestChan, goodLDSRequest, strconv.Itoa(versionLDS), nonce) - versionLDS++ - - // This is an invalid response after the new watch. - nonce = sendXDSRespWithVersion(fakeServer.XDSResponseChan, &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{{}}, - TypeUrl: version.V2ListenerURL, - }, versionLDS) - t.Logf("Bad response pushed to fakeServer...") - - // The expected version string is the previous acked version. - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodLDSRequest, strconv.Itoa(versionLDS-1), nonce, true); err != nil { - t.Errorf("Failed to receive request: %v", err) - } - t.Logf("Bad response nacked") - versionLDS++ - - if _, err := sendGoodResp(ctx, t, xdsclient.ListenerResource, fakeServer, versionLDS, goodLDSResponse1, goodLDSRequest, cbLDS); err != nil { - t.Fatal(err) - } - versionLDS++ -} - -// TestV2ClientAckNewWatchAfterCancel verifies the new request for a new watch -// after the previous watch is canceled, has the right version. -func (s) TestV2ClientAckNewWatchAfterCancel(t *testing.T) { - var versionCDS = 3000 - - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, _, _, cbCDS, _, v2cCleanup := startXDSV2Client(t, cc) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start a CDS watch. - v2c.AddWatch(xdsclient.ClusterResource, goodClusterName1) - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, "", "", false); err != nil { - t.Fatal(err) - } - t.Logf("FakeServer received %v request...", xdsclient.ClusterResource) - - // Send a good CDS response, this function waits for the ACK with the right - // version. - nonce, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) - if err != nil { - t.Fatal(err) - } - // Cancel the CDS watch, and start a new one. The new watch should have the - // version from the response above. - v2c.RemoveWatch(xdsclient.ClusterResource, goodClusterName1) - // Wait for a request with no resource names, because the only watch was - // removed. - emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: version.V2ClusterURL} - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, emptyReq, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) - } - v2c.AddWatch(xdsclient.ClusterResource, goodClusterName1) - // Wait for a request with correct resource names and version. - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) - } - versionCDS++ - - // Send a bad response with the next version. - if err := sendBadResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { - t.Fatal(err) - } - versionCDS++ - - // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { - t.Fatal(err) - } - versionCDS++ -} - -// TestV2ClientAckCancelResponseRace verifies if the response and ACK request -// race with cancel (which means the ACK request will not be sent on wire, -// because there's no active watch), the nonce will still be updated, and the -// new request with the new watch will have the correct nonce. -func (s) TestV2ClientAckCancelResponseRace(t *testing.T) { - var versionCDS = 3000 - - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, _, _, cbCDS, _, v2cCleanup := startXDSV2Client(t, cc) - defer v2cCleanup() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - - // Start a CDS watch. - v2c.AddWatch(xdsclient.ClusterResource, goodClusterName1) - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, "", "", false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) - } - t.Logf("FakeServer received %v request...", xdsclient.ClusterResource) - - // send a good response, and check for ack, with the new version. - nonce, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS) - if err != nil { - t.Fatal(err) - } - // Cancel the watch before the next response is sent. This mimics the case - // watch is canceled while response is on wire. - v2c.RemoveWatch(xdsclient.ClusterResource, goodClusterName1) - // Wait for a request with no resource names, because the only watch was - // removed. - emptyReq := &xdspb.DiscoveryRequest{Node: goodNodeProto, TypeUrl: version.V2ClusterURL} - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, emptyReq, strconv.Itoa(versionCDS), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) - } - versionCDS++ - - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if req, err := fakeServer.XDSRequestChan.Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("Got unexpected xds request after watch is canceled: %v", req) - } - - // Send a good response. - nonce = sendXDSRespWithVersion(fakeServer.XDSResponseChan, goodCDSResponse1, versionCDS) - t.Logf("Good %v response pushed to fakeServer...", xdsclient.ClusterResource) - - // Expect no ACK because watch was canceled. - sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if req, err := fakeServer.XDSRequestChan.Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("Got unexpected xds request after watch is canceled: %v", req) - } - - // Still expected an callback update, because response was good. - if _, err := cbCDS.Receive(ctx); err != nil { - t.Fatalf("Timeout when expecting %v update", xdsclient.ClusterResource) - } - - // Start a new watch. The new watch should have the nonce from the response - // above, and version from the first good response. - v2c.AddWatch(xdsclient.ClusterResource, goodClusterName1) - if err := compareXDSRequest(ctx, fakeServer.XDSRequestChan, goodCDSRequest, strconv.Itoa(versionCDS-1), nonce, false); err != nil { - t.Fatalf("Failed to receive %v request: %v", xdsclient.ClusterResource, err) - } - - // Send a bad response with the next version. - if err := sendBadResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSRequest); err != nil { - t.Fatal(err) - } - versionCDS++ - - // send another good response, and check for ack, with the new version. - if _, err := sendGoodResp(ctx, t, xdsclient.ClusterResource, fakeServer, versionCDS, goodCDSResponse1, goodCDSRequest, cbCDS); err != nil { - t.Fatal(err) - } - versionCDS++ -} diff --git a/xds/internal/client/v2/cds_test.go b/xds/internal/client/v2/cds_test.go deleted file mode 100644 index c71b84532315..000000000000 --- a/xds/internal/client/v2/cds_test.go +++ /dev/null @@ -1,210 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v2 - -import ( - "testing" - "time" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - "github.com/golang/protobuf/ptypes" - anypb "github.com/golang/protobuf/ptypes/any" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/version" -) - -const ( - serviceName1 = "foo-service" - serviceName2 = "bar-service" -) - -var ( - badlyMarshaledCDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2ClusterURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - TypeUrl: version.V2ClusterURL, - } - goodCluster1 = &xdspb.Cluster{ - Name: goodClusterName1, - ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, - EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ - EdsConfig: &corepb.ConfigSource{ - ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ - Ads: &corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName1, - }, - LbPolicy: xdspb.Cluster_ROUND_ROBIN, - LrsServer: &corepb.ConfigSource{ - ConfigSourceSpecifier: &corepb.ConfigSource_Self{ - Self: &corepb.SelfConfigSource{}, - }, - }, - } - marshaledCluster1, _ = ptypes.MarshalAny(goodCluster1) - goodCluster2 = &xdspb.Cluster{ - Name: goodClusterName2, - ClusterDiscoveryType: &xdspb.Cluster_Type{Type: xdspb.Cluster_EDS}, - EdsClusterConfig: &xdspb.Cluster_EdsClusterConfig{ - EdsConfig: &corepb.ConfigSource{ - ConfigSourceSpecifier: &corepb.ConfigSource_Ads{ - Ads: &corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: serviceName2, - }, - LbPolicy: xdspb.Cluster_ROUND_ROBIN, - } - marshaledCluster2, _ = ptypes.MarshalAny(goodCluster2) - goodCDSResponse1 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledCluster1, - }, - TypeUrl: version.V2ClusterURL, - } - goodCDSResponse2 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledCluster2, - }, - TypeUrl: version.V2ClusterURL, - } -) - -// TestCDSHandleResponse starts a fake xDS server, makes a ClientConn to it, -// and creates a v2Client using it. Then, it registers a CDS watcher and tests -// different CDS responses. -func (s) TestCDSHandleResponse(t *testing.T) { - tests := []struct { - name string - cdsResponse *xdspb.DiscoveryResponse - wantErr bool - wantUpdate map[string]xdsclient.ClusterUpdate - wantUpdateMD xdsclient.UpdateMetadata - wantUpdateErr bool - }{ - // Badly marshaled CDS response. - { - name: "badly-marshaled-response", - cdsResponse: badlyMarshaledCDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // Response does not contain Cluster proto. - { - name: "no-cluster-proto-in-response", - cdsResponse: badResourceTypeInLDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // Response contains no clusters. - { - name: "no-cluster", - cdsResponse: &xdspb.DiscoveryResponse{}, - wantErr: false, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one good cluster we are not interested in. - { - name: "one-uninteresting-cluster", - cdsResponse: goodCDSResponse2, - wantErr: false, - wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName2: {ServiceName: serviceName2, Raw: marshaledCluster2}, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one cluster and it is good. - { - name: "one-good-cluster", - cdsResponse: goodCDSResponse1, - wantErr: false, - wantUpdate: map[string]xdsclient.ClusterUpdate{ - goodClusterName1: {ServiceName: serviceName1, EnableLRS: true, Raw: marshaledCluster1}, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testWatchHandle(t, &watchHandleTestcase{ - rType: xdsclient.ClusterResource, - resourceName: goodClusterName1, - - responseToHandle: test.cdsResponse, - wantHandleErr: test.wantErr, - wantUpdate: test.wantUpdate, - wantUpdateMD: test.wantUpdateMD, - wantUpdateErr: test.wantUpdateErr, - }) - }) - } -} - -// TestCDSHandleResponseWithoutWatch tests the case where the v2Client receives -// a CDS response without a registered watcher. -func (s) TestCDSHandleResponseWithoutWatch(t *testing.T) { - _, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - if v2c.handleCDSResponse(badResourceTypeInLDSResponse) == nil { - t.Fatal("v2c.handleCDSResponse() succeeded, should have failed") - } - - if v2c.handleCDSResponse(goodCDSResponse1) != nil { - t.Fatal("v2c.handleCDSResponse() succeeded, should have failed") - } -} diff --git a/xds/internal/client/v2/client.go b/xds/internal/client/v2/client.go deleted file mode 100644 index b6bc4908120d..000000000000 --- a/xds/internal/client/v2/client.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package v2 provides xDS v2 transport protocol specific functionality. -package v2 - -import ( - "context" - "fmt" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpclog" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/version" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v2adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -func init() { - xdsclient.RegisterAPIClientBuilder(clientBuilder{}) -} - -var ( - resourceTypeToURL = map[xdsclient.ResourceType]string{ - xdsclient.ListenerResource: version.V2ListenerURL, - xdsclient.RouteConfigResource: version.V2RouteConfigURL, - xdsclient.ClusterResource: version.V2ClusterURL, - xdsclient.EndpointsResource: version.V2EndpointsURL, - } -) - -type clientBuilder struct{} - -func (clientBuilder) Build(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) { - return newClient(cc, opts) -} - -func (clientBuilder) Version() version.TransportAPI { - return version.TransportV2 -} - -func newClient(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) { - nodeProto, ok := opts.NodeProto.(*v2corepb.Node) - if !ok { - return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, (*v2corepb.Node)(nil)) - } - v2c := &client{ - cc: cc, - parent: opts.Parent, - nodeProto: nodeProto, - logger: opts.Logger, - } - v2c.ctx, v2c.cancelCtx = context.WithCancel(context.Background()) - v2c.TransportHelper = xdsclient.NewTransportHelper(v2c, opts.Logger, opts.Backoff) - return v2c, nil -} - -type adsStream v2adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - -// client performs the actual xDS RPCs using the xDS v2 API. It creates a -// single ADS stream on which the different types of xDS requests and responses -// are multiplexed. -type client struct { - *xdsclient.TransportHelper - - ctx context.Context - cancelCtx context.CancelFunc - parent xdsclient.UpdateHandler - logger *grpclog.PrefixLogger - - // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. - cc *grpc.ClientConn - nodeProto *v2corepb.Node -} - -func (v2c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { - return v2adsgrpc.NewAggregatedDiscoveryServiceClient(v2c.cc).StreamAggregatedResources(v2c.ctx, grpc.WaitForReady(true)) -} - -// sendRequest sends out a DiscoveryRequest for the given resourceNames, of type -// rType, on the provided stream. -// -// version is the ack version to be sent with the request -// - If this is the new request (not an ack/nack), version will be empty. -// - If this is an ack, version will be the version from the response. -// - If this is a nack, version will be the previous acked version (from -// versionMap). If there was no ack before, it will be empty. -func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsclient.ResourceType, version, nonce, errMsg string) error { - stream, ok := s.(adsStream) - if !ok { - return fmt.Errorf("xds: Attempt to send request on unsupported stream type: %T", s) - } - req := &v2xdspb.DiscoveryRequest{ - Node: v2c.nodeProto, - TypeUrl: resourceTypeToURL[rType], - ResourceNames: resourceNames, - VersionInfo: version, - ResponseNonce: nonce, - } - if errMsg != "" { - req.ErrorDetail = &statuspb.Status{ - Code: int32(codes.InvalidArgument), Message: errMsg, - } - } - if err := stream.Send(req); err != nil { - return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err) - } - v2c.logger.Debugf("ADS request sent: %v", req) - return nil -} - -// RecvResponse blocks on the receipt of one response message on the provided -// stream. -func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { - stream, ok := s.(adsStream) - if !ok { - return nil, fmt.Errorf("xds: Attempt to receive response on unsupported stream type: %T", s) - } - - resp, err := stream.Recv() - if err != nil { - // TODO: call watch callbacks with error when stream is broken. - return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) - } - v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - v2c.logger.Debugf("ADS response received: %v", resp) - return resp, nil -} - -func (v2c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, string, string, error) { - rType := xdsclient.UnknownResource - resp, ok := r.(*v2xdspb.DiscoveryResponse) - if !ok { - return rType, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) - } - - // Note that the xDS transport protocol is versioned independently of - // the resource types, and it is supported to transfer older versions - // of resource types using new versions of the transport protocol, or - // vice-versa. Hence we need to handle v3 type_urls as well here. - var err error - url := resp.GetTypeUrl() - switch { - case xdsclient.IsListenerResource(url): - err = v2c.handleLDSResponse(resp) - rType = xdsclient.ListenerResource - case xdsclient.IsRouteConfigResource(url): - err = v2c.handleRDSResponse(resp) - rType = xdsclient.RouteConfigResource - case xdsclient.IsClusterResource(url): - err = v2c.handleCDSResponse(resp) - rType = xdsclient.ClusterResource - case xdsclient.IsEndpointsResource(url): - err = v2c.handleEDSResponse(resp) - rType = xdsclient.EndpointsResource - default: - return rType, "", "", xdsclient.ErrResourceTypeUnsupported{ - ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()), - } - } - return rType, resp.GetVersionInfo(), resp.GetNonce(), err -} - -// handleLDSResponse processes an LDS response received from the management -// server. On receipt of a good response, it also invokes the registered watcher -// callback. -func (v2c *client) handleLDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalListener(resp.GetVersionInfo(), resp.GetResources(), v2c.logger) - v2c.parent.NewListeners(update, md) - return err -} - -// handleRDSResponse processes an RDS response received from the management -// server. On receipt of a good response, it caches validated resources and also -// invokes the registered watcher callback. -func (v2c *client) handleRDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalRouteConfig(resp.GetVersionInfo(), resp.GetResources(), v2c.logger) - v2c.parent.NewRouteConfigs(update, md) - return err -} - -// handleCDSResponse processes an CDS response received from the management -// server. On receipt of a good response, it also invokes the registered watcher -// callback. -func (v2c *client) handleCDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalCluster(resp.GetVersionInfo(), resp.GetResources(), v2c.logger) - v2c.parent.NewClusters(update, md) - return err -} - -func (v2c *client) handleEDSResponse(resp *v2xdspb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalEndpoints(resp.GetVersionInfo(), resp.GetResources(), v2c.logger) - v2c.parent.NewEndpoints(update, md) - return err -} diff --git a/xds/internal/client/v2/client_test.go b/xds/internal/client/v2/client_test.go deleted file mode 100644 index e770324e1b12..000000000000 --- a/xds/internal/client/v2/client_test.go +++ /dev/null @@ -1,698 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v2 - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/resolver/manual" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/version" - "google.golang.org/protobuf/testing/protocmp" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - basepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - routepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/route" - httppb "github.com/envoyproxy/go-control-plane/envoy/config/filter/network/http_connection_manager/v2" - listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v2" - anypb "github.com/golang/protobuf/ptypes/any" - structpb "github.com/golang/protobuf/ptypes/struct" -) - -type s struct { - grpctest.Tester -} - -func Test(t *testing.T) { - grpctest.RunSubTests(t, s{}) -} - -const ( - goodLDSTarget1 = "lds.target.good:1111" - goodLDSTarget2 = "lds.target.good:2222" - goodRouteName1 = "GoodRouteConfig1" - goodRouteName2 = "GoodRouteConfig2" - goodEDSName = "GoodClusterAssignment1" - uninterestingDomain = "uninteresting.domain" - goodClusterName1 = "GoodClusterName1" - goodClusterName2 = "GoodClusterName2" - uninterestingClusterName = "UninterestingClusterName" - httpConnManagerURL = "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" -) - -var ( - goodNodeProto = &basepb.Node{ - Id: "ENVOY_NODE_ID", - Metadata: &structpb.Struct{ - Fields: map[string]*structpb.Value{ - "TRAFFICDIRECTOR_GRPC_HOSTNAME": { - Kind: &structpb.Value_StringValue{StringValue: "trafficdirector"}, - }, - }, - }, - } - goodLDSRequest = &xdspb.DiscoveryRequest{ - Node: goodNodeProto, - TypeUrl: version.V2ListenerURL, - ResourceNames: []string{goodLDSTarget1}, - } - goodRDSRequest = &xdspb.DiscoveryRequest{ - Node: goodNodeProto, - TypeUrl: version.V2RouteConfigURL, - ResourceNames: []string{goodRouteName1}, - } - goodCDSRequest = &xdspb.DiscoveryRequest{ - Node: goodNodeProto, - TypeUrl: version.V2ClusterURL, - ResourceNames: []string{goodClusterName1}, - } - goodEDSRequest = &xdspb.DiscoveryRequest{ - Node: goodNodeProto, - TypeUrl: version.V2EndpointsURL, - ResourceNames: []string{goodEDSName}, - } - goodHTTPConnManager1 = &httppb.HttpConnectionManager{ - RouteSpecifier: &httppb.HttpConnectionManager_Rds{ - Rds: &httppb.Rds{ - ConfigSource: &basepb.ConfigSource{ - ConfigSourceSpecifier: &basepb.ConfigSource_Ads{Ads: &basepb.AggregatedConfigSource{}}, - }, - RouteConfigName: goodRouteName1, - }, - }, - } - marshaledConnMgr1, _ = proto.Marshal(goodHTTPConnManager1) - goodListener1 = &xdspb.Listener{ - Name: goodLDSTarget1, - ApiListener: &listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, - }, - } - marshaledListener1, _ = ptypes.MarshalAny(goodListener1) - goodListener2 = &xdspb.Listener{ - Name: goodLDSTarget2, - ApiListener: &listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, - }, - } - marshaledListener2, _ = ptypes.MarshalAny(goodListener2) - noAPIListener = &xdspb.Listener{Name: goodLDSTarget1} - marshaledNoAPIListener, _ = proto.Marshal(noAPIListener) - badAPIListener2 = &xdspb.Listener{ - Name: goodLDSTarget2, - ApiListener: &listenerpb.ApiListener{ - ApiListener: &anypb.Any{ - TypeUrl: httpConnManagerURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - } - badlyMarshaledAPIListener2, _ = proto.Marshal(badAPIListener2) - goodLDSResponse1 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledListener1, - }, - TypeUrl: version.V2ListenerURL, - } - goodLDSResponse2 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledListener2, - }, - TypeUrl: version.V2ListenerURL, - } - emptyLDSResponse = &xdspb.DiscoveryResponse{TypeUrl: version.V2ListenerURL} - badlyMarshaledLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2ListenerURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - TypeUrl: version.V2ListenerURL, - } - badResourceTypeInLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, - }, - TypeUrl: version.V2ListenerURL, - } - ldsResponseWithMultipleResources = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledListener2, - marshaledListener1, - }, - TypeUrl: version.V2ListenerURL, - } - noAPIListenerLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2ListenerURL, - Value: marshaledNoAPIListener, - }, - }, - TypeUrl: version.V2ListenerURL, - } - goodBadUglyLDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledListener2, - marshaledListener1, - { - TypeUrl: version.V2ListenerURL, - Value: badlyMarshaledAPIListener2, - }, - }, - TypeUrl: version.V2ListenerURL, - } - badlyMarshaledRDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2RouteConfigURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - TypeUrl: version.V2RouteConfigURL, - } - badResourceTypeInRDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, - }, - TypeUrl: version.V2RouteConfigURL, - } - noVirtualHostsRouteConfig = &xdspb.RouteConfiguration{ - Name: goodRouteName1, - } - marshaledNoVirtualHostsRouteConfig, _ = ptypes.MarshalAny(noVirtualHostsRouteConfig) - noVirtualHostsInRDSResponse = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledNoVirtualHostsRouteConfig, - }, - TypeUrl: version.V2RouteConfigURL, - } - goodRouteConfig1 = &xdspb.RouteConfiguration{ - Name: goodRouteName1, - VirtualHosts: []*routepb.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*routepb.Route{ - { - Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &routepb.Route_Route{ - Route: &routepb.RouteAction{ - ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, - }, - }, - }, - }, - }, - { - Domains: []string{goodLDSTarget1}, - Routes: []*routepb.Route{ - { - Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &routepb.Route_Route{ - Route: &routepb.RouteAction{ - ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName1}, - }, - }, - }, - }, - }, - }, - } - marshaledGoodRouteConfig1, _ = ptypes.MarshalAny(goodRouteConfig1) - goodRouteConfig2 = &xdspb.RouteConfiguration{ - Name: goodRouteName2, - VirtualHosts: []*routepb.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*routepb.Route{ - { - Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &routepb.Route_Route{ - Route: &routepb.RouteAction{ - ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, - }, - }, - }, - }, - }, - { - Domains: []string{goodLDSTarget1}, - Routes: []*routepb.Route{ - { - Match: &routepb.RouteMatch{PathSpecifier: &routepb.RouteMatch_Prefix{Prefix: ""}}, - Action: &routepb.Route_Route{ - Route: &routepb.RouteAction{ - ClusterSpecifier: &routepb.RouteAction_Cluster{Cluster: goodClusterName2}, - }, - }, - }, - }, - }, - }, - } - marshaledGoodRouteConfig2, _ = ptypes.MarshalAny(goodRouteConfig2) - goodRDSResponse1 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledGoodRouteConfig1, - }, - TypeUrl: version.V2RouteConfigURL, - } - goodRDSResponse2 = &xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledGoodRouteConfig2, - }, - TypeUrl: version.V2RouteConfigURL, - } - // An place holder error. When comparing UpdateErrorMetadata, we only check - // if error is nil, and don't compare error content. - errPlaceHolder = fmt.Errorf("err place holder") -) - -type watchHandleTestcase struct { - rType xdsclient.ResourceType - resourceName string - - responseToHandle *xdspb.DiscoveryResponse - wantHandleErr bool - wantUpdate interface{} - wantUpdateMD xdsclient.UpdateMetadata - wantUpdateErr bool -} - -type testUpdateReceiver struct { - f func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) -} - -func (t *testUpdateReceiver) NewListeners(d map[string]xdsclient.ListenerUpdate, metadata xdsclient.UpdateMetadata) { - dd := make(map[string]interface{}) - for k, v := range d { - dd[k] = v - } - t.newUpdate(xdsclient.ListenerResource, dd, metadata) -} - -func (t *testUpdateReceiver) NewRouteConfigs(d map[string]xdsclient.RouteConfigUpdate, metadata xdsclient.UpdateMetadata) { - dd := make(map[string]interface{}) - for k, v := range d { - dd[k] = v - } - t.newUpdate(xdsclient.RouteConfigResource, dd, metadata) -} - -func (t *testUpdateReceiver) NewClusters(d map[string]xdsclient.ClusterUpdate, metadata xdsclient.UpdateMetadata) { - dd := make(map[string]interface{}) - for k, v := range d { - dd[k] = v - } - t.newUpdate(xdsclient.ClusterResource, dd, metadata) -} - -func (t *testUpdateReceiver) NewEndpoints(d map[string]xdsclient.EndpointsUpdate, metadata xdsclient.UpdateMetadata) { - dd := make(map[string]interface{}) - for k, v := range d { - dd[k] = v - } - t.newUpdate(xdsclient.EndpointsResource, dd, metadata) -} - -func (t *testUpdateReceiver) newUpdate(rType xdsclient.ResourceType, d map[string]interface{}, metadata xdsclient.UpdateMetadata) { - t.f(rType, d, metadata) -} - -// testWatchHandle is called to test response handling for each xDS. -// -// It starts the xDS watch as configured in test, waits for the fake xds server -// to receive the request (so watch callback is installed), and calls -// handleXDSResp with responseToHandle (if it's set). It then compares the -// update received by watch callback with the expected results. -func testWatchHandle(t *testing.T, test *watchHandleTestcase) { - t.Helper() - - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - type updateErr struct { - u interface{} - md xdsclient.UpdateMetadata - err error - } - gotUpdateCh := testutils.NewChannel() - - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { - if rType == test.rType { - switch test.rType { - case xdsclient.ListenerResource: - dd := make(map[string]xdsclient.ListenerUpdate) - for n, u := range d { - dd[n] = u.(xdsclient.ListenerUpdate) - } - gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsclient.RouteConfigResource: - dd := make(map[string]xdsclient.RouteConfigUpdate) - for n, u := range d { - dd[n] = u.(xdsclient.RouteConfigUpdate) - } - gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsclient.ClusterResource: - dd := make(map[string]xdsclient.ClusterUpdate) - for n, u := range d { - dd[n] = u.(xdsclient.ClusterUpdate) - } - gotUpdateCh.Send(updateErr{dd, md, nil}) - case xdsclient.EndpointsResource: - dd := make(map[string]xdsclient.EndpointsUpdate) - for n, u := range d { - dd[n] = u.(xdsclient.EndpointsUpdate) - } - gotUpdateCh.Send(updateErr{dd, md, nil}) - } - } - }, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - // Register the watcher, this will also trigger the v2Client to send the xDS - // request. - v2c.AddWatch(test.rType, test.resourceName) - - // Wait till the request makes it to the fakeServer. This ensures that - // the watch request has been processed by the v2Client. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout waiting for an xDS request: %v", err) - } - - // Directly push the response through a call to handleXDSResp. This bypasses - // the fakeServer, so it's only testing the handle logic. Client response - // processing is covered elsewhere. - // - // Also note that this won't trigger ACK, so there's no need to clear the - // request channel afterwards. - var handleXDSResp func(response *xdspb.DiscoveryResponse) error - switch test.rType { - case xdsclient.ListenerResource: - handleXDSResp = v2c.handleLDSResponse - case xdsclient.RouteConfigResource: - handleXDSResp = v2c.handleRDSResponse - case xdsclient.ClusterResource: - handleXDSResp = v2c.handleCDSResponse - case xdsclient.EndpointsResource: - handleXDSResp = v2c.handleEDSResponse - } - if err := handleXDSResp(test.responseToHandle); (err != nil) != test.wantHandleErr { - t.Fatalf("v2c.handleRDSResponse() returned err: %v, wantErr: %v", err, test.wantHandleErr) - } - - wantUpdate := test.wantUpdate - cmpOpts := cmp.Options{ - cmpopts.EquateEmpty(), protocmp.Transform(), - cmpopts.IgnoreFields(xdsclient.UpdateMetadata{}, "Timestamp"), - cmpopts.IgnoreFields(xdsclient.UpdateErrorMetadata{}, "Timestamp"), - cmp.Comparer(func(x, y error) bool { return (x == nil) == (y == nil) }), - } - uErr, err := gotUpdateCh.Receive(ctx) - if err == context.DeadlineExceeded { - t.Fatal("Timeout expecting xDS update") - } - gotUpdate := uErr.(updateErr).u - if diff := cmp.Diff(gotUpdate, wantUpdate, cmpOpts); diff != "" { - t.Fatalf("got update : %+v, want %+v, diff: %s", gotUpdate, wantUpdate, diff) - } - gotUpdateMD := uErr.(updateErr).md - if diff := cmp.Diff(gotUpdateMD, test.wantUpdateMD, cmpOpts); diff != "" { - t.Fatalf("got update : %+v, want %+v, diff: %s", gotUpdateMD, test.wantUpdateMD, diff) - } - gotUpdateErr := uErr.(updateErr).err - if (gotUpdateErr != nil) != test.wantUpdateErr { - t.Fatalf("got xDS update error {%v}, wantErr: %v", gotUpdateErr, test.wantUpdateErr) - } -} - -// startServerAndGetCC starts a fake XDS server and also returns a ClientConn -// connected to it. -func startServerAndGetCC(t *testing.T) (*fakeserver.Server, *grpc.ClientConn, func()) { - t.Helper() - - fs, sCleanup, err := fakeserver.StartServer() - if err != nil { - t.Fatalf("Failed to start fake xDS server: %v", err) - } - - cc, ccCleanup, err := fs.XDSClientConn() - if err != nil { - sCleanup() - t.Fatalf("Failed to get a clientConn to the fake xDS server: %v", err) - } - return fs, cc, func() { - sCleanup() - ccCleanup() - } -} - -func newV2Client(p xdsclient.UpdateHandler, cc *grpc.ClientConn, n *basepb.Node, b func(int) time.Duration, l *grpclog.PrefixLogger) (*client, error) { - c, err := newClient(cc, xdsclient.BuildOptions{ - Parent: p, - NodeProto: n, - Backoff: b, - Logger: l, - }) - if err != nil { - return nil, err - } - return c.(*client), nil -} - -// TestV2ClientBackoffAfterRecvError verifies if the v2Client backs off when it -// encounters a Recv error while receiving an LDS response. -func (s) TestV2ClientBackoffAfterRecvError(t *testing.T) { - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - // Override the v2Client backoff function with this, so that we can verify - // that a backoff actually was triggered. - boCh := make(chan int, 1) - clientBackoff := func(v int) time.Duration { - boCh <- v - return 0 - } - - callbackCh := make(chan struct{}) - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) { close(callbackCh) }, - }, cc, goodNodeProto, clientBackoff, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - v2c.AddWatch(xdsclient.ListenerResource, goodLDSTarget1) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} - t.Log("Bad LDS response pushed to fakeServer...") - - timer := time.NewTimer(defaultTestShortTimeout) - select { - case <-timer.C: - t.Fatal("Timeout when expecting LDS update") - case <-boCh: - timer.Stop() - t.Log("v2Client backed off before retrying...") - case <-callbackCh: - t.Fatal("Received unexpected LDS callback") - } - - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request after backoff...") -} - -// TestV2ClientRetriesAfterBrokenStream verifies the case where a stream -// encountered a Recv() error, and is expected to send out xDS requests for -// registered watchers once it comes back up again. -func (s) TestV2ClientRetriesAfterBrokenStream(t *testing.T) { - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - callbackCh := testutils.NewChannel() - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { - if rType == xdsclient.ListenerResource { - if u, ok := d[goodLDSTarget1]; ok { - t.Logf("Received LDS callback with ldsUpdate {%+v}", u) - callbackCh.Send(struct{}{}) - } - } - }, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - v2c.AddWatch(xdsclient.ListenerResource, goodLDSTarget1) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} - t.Log("Good LDS response pushed to fakeServer...") - - if _, err := callbackCh.Receive(ctx); err != nil { - t.Fatal("Timeout when expecting LDS update") - } - - // Read the ack, so the next request is sent after stream re-creation. - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS ACK") - } - - fakeServer.XDSResponseChan <- &fakeserver.Response{Err: errors.New("RPC error")} - t.Log("Bad LDS response pushed to fakeServer...") - - val, err := fakeServer.XDSRequestChan.Receive(ctx) - if err != nil { - t.Fatalf("Timeout expired when expecting LDS update") - } - gotRequest := val.(*fakeserver.Request) - if !proto.Equal(gotRequest.Req, goodLDSRequest) { - t.Fatalf("gotRequest: %+v, wantRequest: %+v", gotRequest.Req, goodLDSRequest) - } -} - -// TestV2ClientWatchWithoutStream verifies the case where a watch is started -// when the xds stream is not created. The watcher should not receive any update -// (because there won't be any xds response, and timeout is done at a upper -// level). And when the stream is re-created, the watcher should get future -// updates. -func (s) TestV2ClientWatchWithoutStream(t *testing.T) { - fakeServer, sCleanup, err := fakeserver.StartServer() - if err != nil { - t.Fatalf("Failed to start fake xDS server: %v", err) - } - defer sCleanup() - - const scheme = "xds_client_test_whatever" - rb := manual.NewBuilderWithScheme(scheme) - rb.InitialState(resolver.State{Addresses: []resolver.Address{{Addr: "no.such.server"}}}) - - cc, err := grpc.Dial(scheme+":///whatever", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithResolvers(rb)) - if err != nil { - t.Fatalf("Failed to dial ClientConn: %v", err) - } - defer cc.Close() - - callbackCh := testutils.NewChannel() - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(rType xdsclient.ResourceType, d map[string]interface{}, md xdsclient.UpdateMetadata) { - if rType == xdsclient.ListenerResource { - if u, ok := d[goodLDSTarget1]; ok { - t.Logf("Received LDS callback with ldsUpdate {%+v}", u) - callbackCh.Send(u) - } - } - }, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - t.Log("Started xds v2Client...") - - // This watch is started when the xds-ClientConn is in Transient Failure, - // and no xds stream is created. - v2c.AddWatch(xdsclient.ListenerResource, goodLDSTarget1) - - // The watcher should receive an update, with a timeout error in it. - sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) - defer sCancel() - if v, err := callbackCh.Receive(sCtx); err == nil { - t.Fatalf("Expect an timeout error from watcher, got %v", v) - } - - // Send the real server address to the ClientConn, the stream should be - // created, and the previous watch should be sent. - rb.UpdateState(resolver.State{ - Addresses: []resolver.Address{{Addr: fakeServer.Address}}, - }) - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout expired when expecting an LDS request") - } - t.Log("FakeServer received request...") - - fakeServer.XDSResponseChan <- &fakeserver.Response{Resp: goodLDSResponse1} - t.Log("Good LDS response pushed to fakeServer...") - - if v, err := callbackCh.Receive(ctx); err != nil { - t.Fatal("Timeout when expecting LDS update") - } else if _, ok := v.(xdsclient.ListenerUpdate); !ok { - t.Fatalf("Expect an LDS update from watcher, got %v", v) - } -} - -func newStringP(s string) *string { - return &s -} diff --git a/xds/internal/client/v2/eds_test.go b/xds/internal/client/v2/eds_test.go deleted file mode 100644 index 0990e7ebae0e..000000000000 --- a/xds/internal/client/v2/eds_test.go +++ /dev/null @@ -1,206 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v2 - -import ( - "testing" - "time" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - "github.com/golang/protobuf/ptypes" - anypb "github.com/golang/protobuf/ptypes/any" - "google.golang.org/grpc/xds/internal" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/version" -) - -var ( - badlyMarshaledEDSResponse = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: version.V2EndpointsURL, - Value: []byte{1, 2, 3, 4}, - }, - }, - TypeUrl: version.V2EndpointsURL, - } - badResourceTypeInEDSResponse = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - { - TypeUrl: httpConnManagerURL, - Value: marshaledConnMgr1, - }, - }, - TypeUrl: version.V2EndpointsURL, - } - marshaledGoodCLA1 = func() *anypb.Any { - clab0 := testutils.NewClusterLoadAssignmentBuilder(goodEDSName, nil) - clab0.AddLocality("locality-1", 1, 1, []string{"addr1:314"}, nil) - clab0.AddLocality("locality-2", 1, 0, []string{"addr2:159"}, nil) - a, _ := ptypes.MarshalAny(clab0.Build()) - return a - }() - goodEDSResponse1 = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledGoodCLA1, - }, - TypeUrl: version.V2EndpointsURL, - } - marshaledGoodCLA2 = func() *anypb.Any { - clab0 := testutils.NewClusterLoadAssignmentBuilder("not-goodEDSName", nil) - clab0.AddLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) - a, _ := ptypes.MarshalAny(clab0.Build()) - return a - }() - goodEDSResponse2 = &v2xdspb.DiscoveryResponse{ - Resources: []*anypb.Any{ - marshaledGoodCLA2, - }, - TypeUrl: version.V2EndpointsURL, - } -) - -func (s) TestEDSHandleResponse(t *testing.T) { - tests := []struct { - name string - edsResponse *v2xdspb.DiscoveryResponse - wantErr bool - wantUpdate map[string]xdsclient.EndpointsUpdate - wantUpdateMD xdsclient.UpdateMetadata - wantUpdateErr bool - }{ - // Any in resource is badly marshaled. - { - name: "badly-marshaled_response", - edsResponse: badlyMarshaledEDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // Response doesn't contain resource with the right type. - { - name: "no-config-in-response", - edsResponse: badResourceTypeInEDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // Response contains one uninteresting ClusterLoadAssignment. - { - name: "one-uninterestring-assignment", - edsResponse: goodEDSResponse2, - wantErr: false, - wantUpdate: map[string]xdsclient.EndpointsUpdate{ - "not-goodEDSName": { - Localities: []xdsclient.Locality{ - { - Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 0, - Weight: 1, - }, - }, - Raw: marshaledGoodCLA2, - }, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one good ClusterLoadAssignment. - { - name: "one-good-assignment", - edsResponse: goodEDSResponse1, - wantErr: false, - wantUpdate: map[string]xdsclient.EndpointsUpdate{ - goodEDSName: { - Localities: []xdsclient.Locality{ - { - Endpoints: []xdsclient.Endpoint{{Address: "addr1:314"}}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []xdsclient.Endpoint{{Address: "addr2:159"}}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, - }, - Raw: marshaledGoodCLA1, - }, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testWatchHandle(t, &watchHandleTestcase{ - rType: xdsclient.EndpointsResource, - resourceName: goodEDSName, - responseToHandle: test.edsResponse, - wantHandleErr: test.wantErr, - wantUpdate: test.wantUpdate, - wantUpdateMD: test.wantUpdateMD, - wantUpdateErr: test.wantUpdateErr, - }) - }) - } -} - -// TestEDSHandleResponseWithoutWatch tests the case where the v2Client -// receives an EDS response without a registered EDS watcher. -func (s) TestEDSHandleResponseWithoutWatch(t *testing.T) { - _, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - if v2c.handleEDSResponse(badResourceTypeInEDSResponse) == nil { - t.Fatal("v2c.handleEDSResponse() succeeded, should have failed") - } - - if v2c.handleEDSResponse(goodEDSResponse1) != nil { - t.Fatal("v2c.handleEDSResponse() succeeded, should have failed") - } -} diff --git a/xds/internal/client/v2/lds_test.go b/xds/internal/client/v2/lds_test.go deleted file mode 100644 index 1f4c980fae5e..000000000000 --- a/xds/internal/client/v2/lds_test.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - * - * Copyright 2019 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v2 - -import ( - "testing" - "time" - - v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - - xdsclient "google.golang.org/grpc/xds/internal/client" -) - -// TestLDSHandleResponse starts a fake xDS server, makes a ClientConn to it, -// and creates a client using it. Then, it registers a watchLDS and tests -// different LDS responses. -func (s) TestLDSHandleResponse(t *testing.T) { - tests := []struct { - name string - ldsResponse *v2xdspb.DiscoveryResponse - wantErr bool - wantUpdate map[string]xdsclient.ListenerUpdate - wantUpdateMD xdsclient.UpdateMetadata - wantUpdateErr bool - }{ - // Badly marshaled LDS response. - { - name: "badly-marshaled-response", - ldsResponse: badlyMarshaledLDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // Response does not contain Listener proto. - { - name: "no-listener-proto-in-response", - ldsResponse: badResourceTypeInLDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // No APIListener in the response. Just one test case here for a bad - // ApiListener, since the others are covered in - // TestGetRouteConfigNameFromListener. - { - name: "no-apiListener-in-response", - ldsResponse: noAPIListenerLDSResponse, - wantErr: true, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget1: {}, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // Response contains one listener and it is good. - { - name: "one-good-listener", - ldsResponse: goodLDSResponse1, - wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1}, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains multiple good listeners, including the one we are - // interested in. - { - name: "multiple-good-listener", - ldsResponse: ldsResponseWithMultipleResources, - wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1}, - goodLDSTarget2: {RouteConfigName: goodRouteName1, Raw: marshaledListener2}, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains two good listeners (one interesting and one - // uninteresting), and one badly marshaled listener. This will cause a - // nack because the uninteresting listener will still be parsed. - { - name: "good-bad-ugly-listeners", - ldsResponse: goodBadUglyLDSResponse, - wantErr: true, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget1: {RouteConfigName: goodRouteName1, Raw: marshaledListener1}, - goodLDSTarget2: {}, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // Response contains one listener, but we are not interested in it. - { - name: "one-uninteresting-listener", - ldsResponse: goodLDSResponse2, - wantErr: false, - wantUpdate: map[string]xdsclient.ListenerUpdate{ - goodLDSTarget2: {RouteConfigName: goodRouteName1, Raw: marshaledListener2}, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response constains no resources. This is the case where the server - // does not know about the target we are interested in. - { - name: "empty-response", - ldsResponse: emptyLDSResponse, - wantErr: false, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testWatchHandle(t, &watchHandleTestcase{ - rType: xdsclient.ListenerResource, - resourceName: goodLDSTarget1, - responseToHandle: test.ldsResponse, - wantHandleErr: test.wantErr, - wantUpdate: test.wantUpdate, - wantUpdateMD: test.wantUpdateMD, - wantUpdateErr: test.wantUpdateErr, - }) - }) - } -} - -// TestLDSHandleResponseWithoutWatch tests the case where the client receives -// an LDS response without a registered watcher. -func (s) TestLDSHandleResponseWithoutWatch(t *testing.T) { - _, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - if v2c.handleLDSResponse(badResourceTypeInLDSResponse) == nil { - t.Fatal("v2c.handleLDSResponse() succeeded, should have failed") - } - - if v2c.handleLDSResponse(goodLDSResponse1) != nil { - t.Fatal("v2c.handleLDSResponse() succeeded, should have failed") - } -} diff --git a/xds/internal/client/v2/loadreport.go b/xds/internal/client/v2/loadreport.go deleted file mode 100644 index 69405fcd9ad3..000000000000 --- a/xds/internal/client/v2/loadreport.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v2 - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "google.golang.org/grpc/xds/internal/client/load" - - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v2endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" - lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal" -) - -const clientFeatureLRSSendAllClusters = "envoy.lrs.supports_send_all_clusters" - -type lrsStream lrsgrpc.LoadReportingService_StreamLoadStatsClient - -func (v2c *client) NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) { - c := lrsgrpc.NewLoadReportingServiceClient(cc) - return c.StreamLoadStats(ctx) -} - -func (v2c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { - stream, ok := s.(lrsStream) - if !ok { - return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) - } - node := proto.Clone(v2c.nodeProto).(*v2corepb.Node) - if node == nil { - node = &v2corepb.Node{} - } - node.ClientFeatures = append(node.ClientFeatures, clientFeatureLRSSendAllClusters) - - req := &lrspb.LoadStatsRequest{Node: node} - v2c.logger.Infof("lrs: sending init LoadStatsRequest: %v", req) - return stream.Send(req) -} - -func (v2c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time.Duration, error) { - stream, ok := s.(lrsStream) - if !ok { - return nil, 0, fmt.Errorf("lrs: Attempt to receive response on unsupported stream type: %T", s) - } - - resp, err := stream.Recv() - if err != nil { - return nil, 0, fmt.Errorf("lrs: failed to receive first response: %v", err) - } - v2c.logger.Infof("lrs: received first LoadStatsResponse: %+v", resp) - - interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) - if err != nil { - return nil, 0, fmt.Errorf("lrs: failed to convert report interval: %v", err) - } - - if resp.ReportEndpointGranularity { - // TODO: fixme to support per endpoint loads. - return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") - } - - clusters := resp.Clusters - if resp.SendAllClusters { - // Return nil to send stats for all clusters. - clusters = nil - } - - return clusters, interval, nil -} - -func (v2c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error { - stream, ok := s.(lrsStream) - if !ok { - return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) - } - - var clusterStats []*v2endpointpb.ClusterStats - for _, sd := range loads { - var ( - droppedReqs []*v2endpointpb.ClusterStats_DroppedRequests - localityStats []*v2endpointpb.UpstreamLocalityStats - ) - for category, count := range sd.Drops { - droppedReqs = append(droppedReqs, &v2endpointpb.ClusterStats_DroppedRequests{ - Category: category, - DroppedCount: count, - }) - } - for l, localityData := range sd.LocalityStats { - lid, err := internal.LocalityIDFromString(l) - if err != nil { - return err - } - var loadMetricStats []*v2endpointpb.EndpointLoadMetricStats - for name, loadData := range localityData.LoadStats { - loadMetricStats = append(loadMetricStats, &v2endpointpb.EndpointLoadMetricStats{ - MetricName: name, - NumRequestsFinishedWithMetric: loadData.Count, - TotalMetricValue: loadData.Sum, - }) - } - localityStats = append(localityStats, &v2endpointpb.UpstreamLocalityStats{ - Locality: &v2corepb.Locality{ - Region: lid.Region, - Zone: lid.Zone, - SubZone: lid.SubZone, - }, - TotalSuccessfulRequests: localityData.RequestStats.Succeeded, - TotalRequestsInProgress: localityData.RequestStats.InProgress, - TotalErrorRequests: localityData.RequestStats.Errored, - LoadMetricStats: loadMetricStats, - UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. - }) - } - - clusterStats = append(clusterStats, &v2endpointpb.ClusterStats{ - ClusterName: sd.Cluster, - ClusterServiceName: sd.Service, - UpstreamLocalityStats: localityStats, - TotalDroppedRequests: sd.TotalDrops, - DroppedRequests: droppedReqs, - LoadReportInterval: ptypes.DurationProto(sd.ReportInterval), - }) - - } - - req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} - v2c.logger.Infof("lrs: sending LRS loads: %+v", req) - return stream.Send(req) -} diff --git a/xds/internal/client/v2/rds_test.go b/xds/internal/client/v2/rds_test.go deleted file mode 100644 index dd145158b8a9..000000000000 --- a/xds/internal/client/v2/rds_test.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v2 - -import ( - "context" - "testing" - "time" - - xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" - - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" -) - -// doLDS makes a LDS watch, and waits for the response and ack to finish. -// -// This is called by RDS tests to start LDS first, because LDS is a -// pre-requirement for RDS, and RDS handle would fail without an existing LDS -// watch. -func doLDS(ctx context.Context, t *testing.T, v2c xdsclient.APIClient, fakeServer *fakeserver.Server) { - v2c.AddWatch(xdsclient.ListenerResource, goodLDSTarget1) - if _, err := fakeServer.XDSRequestChan.Receive(ctx); err != nil { - t.Fatalf("Timeout waiting for LDS request: %v", err) - } -} - -// TestRDSHandleResponseWithRouting starts a fake xDS server, makes a ClientConn -// to it, and creates a v2Client using it. Then, it registers an LDS and RDS -// watcher and tests different RDS responses. -func (s) TestRDSHandleResponseWithRouting(t *testing.T) { - tests := []struct { - name string - rdsResponse *xdspb.DiscoveryResponse - wantErr bool - wantUpdate map[string]xdsclient.RouteConfigUpdate - wantUpdateMD xdsclient.UpdateMetadata - wantUpdateErr bool - }{ - // Badly marshaled RDS response. - { - name: "badly-marshaled-response", - rdsResponse: badlyMarshaledRDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // Response does not contain RouteConfiguration proto. - { - name: "no-route-config-in-response", - rdsResponse: badResourceTypeInRDSResponse, - wantErr: true, - wantUpdate: nil, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusNACKed, - ErrState: &xdsclient.UpdateErrorMetadata{ - Err: errPlaceHolder, - }, - }, - wantUpdateErr: false, - }, - // No VirtualHosts in the response. Just one test case here for a bad - // RouteConfiguration, since the others are covered in - // TestGetClusterFromRouteConfiguration. - { - name: "no-virtual-hosts-in-response", - rdsResponse: noVirtualHostsInRDSResponse, - wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdate{ - goodRouteName1: { - VirtualHosts: nil, - Raw: marshaledNoVirtualHostsRouteConfig, - }, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one good RouteConfiguration, uninteresting though. - { - name: "one-uninteresting-route-config", - rdsResponse: goodRDSResponse2, - wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdate{ - goodRouteName2: { - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{goodLDSTarget1}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName2: {Weight: 1}}}}, - }, - }, - Raw: marshaledGoodRouteConfig2, - }, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - // Response contains one good interesting RouteConfiguration. - { - name: "one-good-route-config", - rdsResponse: goodRDSResponse1, - wantErr: false, - wantUpdate: map[string]xdsclient.RouteConfigUpdate{ - goodRouteName1: { - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{uninterestingDomain}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{uninterestingClusterName: {Weight: 1}}}}, - }, - { - Domains: []string{goodLDSTarget1}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{goodClusterName1: {Weight: 1}}}}, - }, - }, - Raw: marshaledGoodRouteConfig1, - }, - }, - wantUpdateMD: xdsclient.UpdateMetadata{ - Status: xdsclient.ServiceStatusACKed, - }, - wantUpdateErr: false, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testWatchHandle(t, &watchHandleTestcase{ - rType: xdsclient.RouteConfigResource, - resourceName: goodRouteName1, - responseToHandle: test.rdsResponse, - wantHandleErr: test.wantErr, - wantUpdate: test.wantUpdate, - wantUpdateMD: test.wantUpdateMD, - wantUpdateErr: test.wantUpdateErr, - }) - }) - } -} - -// TestRDSHandleResponseWithoutRDSWatch tests the case where the v2Client -// receives an RDS response without a registered RDS watcher. -func (s) TestRDSHandleResponseWithoutRDSWatch(t *testing.T) { - fakeServer, cc, cleanup := startServerAndGetCC(t) - defer cleanup() - - v2c, err := newV2Client(&testUpdateReceiver{ - f: func(xdsclient.ResourceType, map[string]interface{}, xdsclient.UpdateMetadata) {}, - }, cc, goodNodeProto, func(int) time.Duration { return 0 }, nil) - if err != nil { - t.Fatal(err) - } - defer v2c.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - doLDS(ctx, t, v2c, fakeServer) - - if v2c.handleRDSResponse(badResourceTypeInRDSResponse) == nil { - t.Fatal("v2c.handleRDSResponse() succeeded, should have failed") - } - - if v2c.handleRDSResponse(goodRDSResponse1) != nil { - t.Fatal("v2c.handleRDSResponse() succeeded, should have failed") - } -} diff --git a/xds/internal/client/v3/client.go b/xds/internal/client/v3/client.go deleted file mode 100644 index 55cae56d8cc6..000000000000 --- a/xds/internal/client/v3/client.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package v3 provides xDS v3 transport protocol specific functionality. -package v3 - -import ( - "context" - "fmt" - - "github.com/golang/protobuf/proto" - statuspb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal/grpclog" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/version" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" -) - -func init() { - xdsclient.RegisterAPIClientBuilder(clientBuilder{}) -} - -var ( - resourceTypeToURL = map[xdsclient.ResourceType]string{ - xdsclient.ListenerResource: version.V3ListenerURL, - xdsclient.RouteConfigResource: version.V3RouteConfigURL, - xdsclient.ClusterResource: version.V3ClusterURL, - xdsclient.EndpointsResource: version.V3EndpointsURL, - } -) - -type clientBuilder struct{} - -func (clientBuilder) Build(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) { - return newClient(cc, opts) -} - -func (clientBuilder) Version() version.TransportAPI { - return version.TransportV3 -} - -func newClient(cc *grpc.ClientConn, opts xdsclient.BuildOptions) (xdsclient.APIClient, error) { - nodeProto, ok := opts.NodeProto.(*v3corepb.Node) - if !ok { - return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, v3corepb.Node{}) - } - v3c := &client{ - cc: cc, - parent: opts.Parent, - nodeProto: nodeProto, - logger: opts.Logger, - } - v3c.ctx, v3c.cancelCtx = context.WithCancel(context.Background()) - v3c.TransportHelper = xdsclient.NewTransportHelper(v3c, opts.Logger, opts.Backoff) - return v3c, nil -} - -type adsStream v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - -// client performs the actual xDS RPCs using the xDS v3 API. It creates a -// single ADS stream on which the different types of xDS requests and responses -// are multiplexed. -type client struct { - *xdsclient.TransportHelper - - ctx context.Context - cancelCtx context.CancelFunc - parent xdsclient.UpdateHandler - logger *grpclog.PrefixLogger - - // ClientConn to the xDS gRPC server. Owned by the parent xdsClient. - cc *grpc.ClientConn - nodeProto *v3corepb.Node -} - -func (v3c *client) NewStream(ctx context.Context) (grpc.ClientStream, error) { - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(v3c.cc).StreamAggregatedResources(v3c.ctx, grpc.WaitForReady(true)) -} - -// sendRequest sends out a DiscoveryRequest for the given resourceNames, of type -// rType, on the provided stream. -// -// version is the ack version to be sent with the request -// - If this is the new request (not an ack/nack), version will be empty. -// - If this is an ack, version will be the version from the response. -// - If this is a nack, version will be the previous acked version (from -// versionMap). If there was no ack before, it will be empty. -func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsclient.ResourceType, version, nonce, errMsg string) error { - stream, ok := s.(adsStream) - if !ok { - return fmt.Errorf("xds: Attempt to send request on unsupported stream type: %T", s) - } - req := &v3discoverypb.DiscoveryRequest{ - Node: v3c.nodeProto, - TypeUrl: resourceTypeToURL[rType], - ResourceNames: resourceNames, - VersionInfo: version, - ResponseNonce: nonce, - } - if errMsg != "" { - req.ErrorDetail = &statuspb.Status{ - Code: int32(codes.InvalidArgument), Message: errMsg, - } - } - if err := stream.Send(req); err != nil { - return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err) - } - v3c.logger.Debugf("ADS request sent: %v", req) - return nil -} - -// RecvResponse blocks on the receipt of one response message on the provided -// stream. -func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) { - stream, ok := s.(adsStream) - if !ok { - return nil, fmt.Errorf("xds: Attempt to receive response on unsupported stream type: %T", s) - } - - resp, err := stream.Recv() - if err != nil { - // TODO: call watch callbacks with error when stream is broken. - return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err) - } - v3c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl()) - v3c.logger.Debugf("ADS response received: %+v", resp) - return resp, nil -} - -func (v3c *client) HandleResponse(r proto.Message) (xdsclient.ResourceType, string, string, error) { - rType := xdsclient.UnknownResource - resp, ok := r.(*v3discoverypb.DiscoveryResponse) - if !ok { - return rType, "", "", fmt.Errorf("xds: unsupported message type: %T", resp) - } - - // Note that the xDS transport protocol is versioned independently of - // the resource types, and it is supported to transfer older versions - // of resource types using new versions of the transport protocol, or - // vice-versa. Hence we need to handle v3 type_urls as well here. - var err error - url := resp.GetTypeUrl() - switch { - case xdsclient.IsListenerResource(url): - err = v3c.handleLDSResponse(resp) - rType = xdsclient.ListenerResource - case xdsclient.IsRouteConfigResource(url): - err = v3c.handleRDSResponse(resp) - rType = xdsclient.RouteConfigResource - case xdsclient.IsClusterResource(url): - err = v3c.handleCDSResponse(resp) - rType = xdsclient.ClusterResource - case xdsclient.IsEndpointsResource(url): - err = v3c.handleEDSResponse(resp) - rType = xdsclient.EndpointsResource - default: - return rType, "", "", xdsclient.ErrResourceTypeUnsupported{ - ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()), - } - } - return rType, resp.GetVersionInfo(), resp.GetNonce(), err -} - -// handleLDSResponse processes an LDS response received from the management -// server. On receipt of a good response, it also invokes the registered watcher -// callback. -func (v3c *client) handleLDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalListener(resp.GetVersionInfo(), resp.GetResources(), v3c.logger) - v3c.parent.NewListeners(update, md) - return err -} - -// handleRDSResponse processes an RDS response received from the management -// server. On receipt of a good response, it caches validated resources and also -// invokes the registered watcher callback. -func (v3c *client) handleRDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalRouteConfig(resp.GetVersionInfo(), resp.GetResources(), v3c.logger) - v3c.parent.NewRouteConfigs(update, md) - return err -} - -// handleCDSResponse processes an CDS response received from the management -// server. On receipt of a good response, it also invokes the registered watcher -// callback. -func (v3c *client) handleCDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalCluster(resp.GetVersionInfo(), resp.GetResources(), v3c.logger) - v3c.parent.NewClusters(update, md) - return err -} - -func (v3c *client) handleEDSResponse(resp *v3discoverypb.DiscoveryResponse) error { - update, md, err := xdsclient.UnmarshalEndpoints(resp.GetVersionInfo(), resp.GetResources(), v3c.logger) - v3c.parent.NewEndpoints(update, md) - return err -} diff --git a/xds/internal/client/v3/loadreport.go b/xds/internal/client/v3/loadreport.go deleted file mode 100644 index 74e18632aa07..000000000000 --- a/xds/internal/client/v3/loadreport.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v3 - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "google.golang.org/grpc/xds/internal/client/load" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" - lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/xds/internal" -) - -const clientFeatureLRSSendAllClusters = "envoy.lrs.supports_send_all_clusters" - -type lrsStream lrsgrpc.LoadReportingService_StreamLoadStatsClient - -func (v3c *client) NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) { - c := lrsgrpc.NewLoadReportingServiceClient(cc) - return c.StreamLoadStats(ctx) -} - -func (v3c *client) SendFirstLoadStatsRequest(s grpc.ClientStream) error { - stream, ok := s.(lrsStream) - if !ok { - return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) - } - node := proto.Clone(v3c.nodeProto).(*v3corepb.Node) - if node == nil { - node = &v3corepb.Node{} - } - node.ClientFeatures = append(node.ClientFeatures, clientFeatureLRSSendAllClusters) - - req := &lrspb.LoadStatsRequest{Node: node} - v3c.logger.Infof("lrs: sending init LoadStatsRequest: %v", req) - return stream.Send(req) -} - -func (v3c *client) HandleLoadStatsResponse(s grpc.ClientStream) ([]string, time.Duration, error) { - stream, ok := s.(lrsStream) - if !ok { - return nil, 0, fmt.Errorf("lrs: Attempt to receive response on unsupported stream type: %T", s) - } - - resp, err := stream.Recv() - if err != nil { - return nil, 0, fmt.Errorf("lrs: failed to receive first response: %v", err) - } - v3c.logger.Infof("lrs: received first LoadStatsResponse: %+v", resp) - - interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) - if err != nil { - return nil, 0, fmt.Errorf("lrs: failed to convert report interval: %v", err) - } - - if resp.ReportEndpointGranularity { - // TODO: fixme to support per endpoint loads. - return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") - } - - clusters := resp.Clusters - if resp.SendAllClusters { - // Return nil to send stats for all clusters. - clusters = nil - } - - return clusters, interval, nil -} - -func (v3c *client) SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error { - stream, ok := s.(lrsStream) - if !ok { - return fmt.Errorf("lrs: Attempt to send request on unsupported stream type: %T", s) - } - - var clusterStats []*v3endpointpb.ClusterStats - for _, sd := range loads { - var ( - droppedReqs []*v3endpointpb.ClusterStats_DroppedRequests - localityStats []*v3endpointpb.UpstreamLocalityStats - ) - for category, count := range sd.Drops { - droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ - Category: category, - DroppedCount: count, - }) - } - for l, localityData := range sd.LocalityStats { - lid, err := internal.LocalityIDFromString(l) - if err != nil { - return err - } - var loadMetricStats []*v3endpointpb.EndpointLoadMetricStats - for name, loadData := range localityData.LoadStats { - loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ - MetricName: name, - NumRequestsFinishedWithMetric: loadData.Count, - TotalMetricValue: loadData.Sum, - }) - } - localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ - Locality: &v3corepb.Locality{ - Region: lid.Region, - Zone: lid.Zone, - SubZone: lid.SubZone, - }, - TotalSuccessfulRequests: localityData.RequestStats.Succeeded, - TotalRequestsInProgress: localityData.RequestStats.InProgress, - TotalErrorRequests: localityData.RequestStats.Errored, - LoadMetricStats: loadMetricStats, - UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. - }) - } - - clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ - ClusterName: sd.Cluster, - ClusterServiceName: sd.Service, - UpstreamLocalityStats: localityStats, - TotalDroppedRequests: sd.TotalDrops, - DroppedRequests: droppedReqs, - LoadReportInterval: ptypes.DurationProto(sd.ReportInterval), - }) - } - - req := &lrspb.LoadStatsRequest{ClusterStats: clusterStats} - v3c.logger.Infof("lrs: sending LRS loads: %+v", req) - return stream.Send(req) -} diff --git a/xds/internal/client/watchers.go b/xds/internal/client/watchers.go deleted file mode 100644 index 9fafe5a60f83..000000000000 --- a/xds/internal/client/watchers.go +++ /dev/null @@ -1,299 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "fmt" - "sync" - "time" -) - -type watchInfoState int - -const ( - watchInfoStateStarted watchInfoState = iota - watchInfoStateRespReceived - watchInfoStateTimeout - watchInfoStateCanceled -) - -// watchInfo holds all the information from a watch() call. -type watchInfo struct { - c *clientImpl - rType ResourceType - target string - - ldsCallback func(ListenerUpdate, error) - rdsCallback func(RouteConfigUpdate, error) - cdsCallback func(ClusterUpdate, error) - edsCallback func(EndpointsUpdate, error) - - expiryTimer *time.Timer - - // mu protects state, and c.scheduleCallback(). - // - No callback should be scheduled after watchInfo is canceled. - // - No timeout error should be scheduled after watchInfo is resp received. - mu sync.Mutex - state watchInfoState -} - -func (wi *watchInfo) newUpdate(update interface{}) { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.state = watchInfoStateRespReceived - wi.expiryTimer.Stop() - wi.c.scheduleCallback(wi, update, nil) -} - -func (wi *watchInfo) resourceNotFound() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.state = watchInfoStateRespReceived - wi.expiryTimer.Stop() - wi.sendErrorLocked(NewErrorf(ErrorTypeResourceNotFound, "xds: %v target %s not found in received response", wi.rType, wi.target)) -} - -func (wi *watchInfo) timeout() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled || wi.state == watchInfoStateRespReceived { - return - } - wi.state = watchInfoStateTimeout - wi.sendErrorLocked(fmt.Errorf("xds: %v target %s not found, watcher timeout", wi.rType, wi.target)) -} - -// Caller must hold wi.mu. -func (wi *watchInfo) sendErrorLocked(err error) { - var ( - u interface{} - ) - switch wi.rType { - case ListenerResource: - u = ListenerUpdate{} - case RouteConfigResource: - u = RouteConfigUpdate{} - case ClusterResource: - u = ClusterUpdate{} - case EndpointsResource: - u = EndpointsUpdate{} - } - wi.c.scheduleCallback(wi, u, err) -} - -func (wi *watchInfo) cancel() { - wi.mu.Lock() - defer wi.mu.Unlock() - if wi.state == watchInfoStateCanceled { - return - } - wi.expiryTimer.Stop() - wi.state = watchInfoStateCanceled -} - -func (c *clientImpl) watch(wi *watchInfo) (cancel func()) { - c.mu.Lock() - defer c.mu.Unlock() - c.logger.Debugf("new watch for type %v, resource name %v", wi.rType, wi.target) - var ( - watchers map[string]map[*watchInfo]bool - mds map[string]UpdateMetadata - ) - switch wi.rType { - case ListenerResource: - watchers = c.ldsWatchers - mds = c.ldsMD - case RouteConfigResource: - watchers = c.rdsWatchers - mds = c.rdsMD - case ClusterResource: - watchers = c.cdsWatchers - mds = c.cdsMD - case EndpointsResource: - watchers = c.edsWatchers - mds = c.edsMD - default: - c.logger.Errorf("unknown watch type: %v", wi.rType) - return nil - } - - resourceName := wi.target - s, ok := watchers[wi.target] - if !ok { - // If this is a new watcher, will ask lower level to send a new request - // with the resource name. - // - // If this (type+name) is already being watched, will not notify the - // underlying versioned apiClient. - c.logger.Debugf("first watch for type %v, resource name %v, will send a new xDS request", wi.rType, wi.target) - s = make(map[*watchInfo]bool) - watchers[resourceName] = s - mds[resourceName] = UpdateMetadata{Status: ServiceStatusRequested} - c.apiClient.AddWatch(wi.rType, resourceName) - } - // No matter what, add the new watcher to the set, so it's callback will be - // call for new responses. - s[wi] = true - - // If the resource is in cache, call the callback with the value. - switch wi.rType { - case ListenerResource: - if v, ok := c.ldsCache[resourceName]; ok { - c.logger.Debugf("LDS resource with name %v found in cache: %+v", wi.target, v) - wi.newUpdate(v) - } - case RouteConfigResource: - if v, ok := c.rdsCache[resourceName]; ok { - c.logger.Debugf("RDS resource with name %v found in cache: %+v", wi.target, v) - wi.newUpdate(v) - } - case ClusterResource: - if v, ok := c.cdsCache[resourceName]; ok { - c.logger.Debugf("CDS resource with name %v found in cache: %+v", wi.target, v) - wi.newUpdate(v) - } - case EndpointsResource: - if v, ok := c.edsCache[resourceName]; ok { - c.logger.Debugf("EDS resource with name %v found in cache: %+v", wi.target, v) - wi.newUpdate(v) - } - } - - return func() { - c.logger.Debugf("watch for type %v, resource name %v canceled", wi.rType, wi.target) - wi.cancel() - c.mu.Lock() - defer c.mu.Unlock() - if s := watchers[resourceName]; s != nil { - // Remove this watcher, so it's callback will not be called in the - // future. - delete(s, wi) - if len(s) == 0 { - c.logger.Debugf("last watch for type %v, resource name %v canceled, will send a new xDS request", wi.rType, wi.target) - // If this was the last watcher, also tell xdsv2Client to stop - // watching this resource. - delete(watchers, resourceName) - delete(mds, resourceName) - c.apiClient.RemoveWatch(wi.rType, resourceName) - // Remove the resource from cache. When a watch for this - // resource is added later, it will trigger a xDS request with - // resource names, and client will receive new xDS responses. - switch wi.rType { - case ListenerResource: - delete(c.ldsCache, resourceName) - case RouteConfigResource: - delete(c.rdsCache, resourceName) - case ClusterResource: - delete(c.cdsCache, resourceName) - case EndpointsResource: - delete(c.edsCache, resourceName) - } - } - } - } -} - -// WatchListener uses LDS to discover information about the provided listener. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchListener(serviceName string, cb func(ListenerUpdate, error)) (cancel func()) { - wi := &watchInfo{ - c: c, - rType: ListenerResource, - target: serviceName, - ldsCallback: cb, - } - - wi.expiryTimer = time.AfterFunc(c.watchExpiryTimeout, func() { - wi.timeout() - }) - return c.watch(wi) -} - -// WatchRouteConfig starts a listener watcher for the service.. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchRouteConfig(routeName string, cb func(RouteConfigUpdate, error)) (cancel func()) { - wi := &watchInfo{ - c: c, - rType: RouteConfigResource, - target: routeName, - rdsCallback: cb, - } - - wi.expiryTimer = time.AfterFunc(c.watchExpiryTimeout, func() { - wi.timeout() - }) - return c.watch(wi) -} - -// WatchCluster uses CDS to discover information about the provided -// clusterName. -// -// WatchCluster can be called multiple times, with same or different -// clusterNames. Each call will start an independent watcher for the resource. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchCluster(clusterName string, cb func(ClusterUpdate, error)) (cancel func()) { - wi := &watchInfo{ - c: c, - rType: ClusterResource, - target: clusterName, - cdsCallback: cb, - } - - wi.expiryTimer = time.AfterFunc(c.watchExpiryTimeout, func() { - wi.timeout() - }) - return c.watch(wi) -} - -// WatchEndpoints uses EDS to discover endpoints in the provided clusterName. -// -// WatchEndpoints can be called multiple times, with same or different -// clusterNames. Each call will start an independent watcher for the resource. -// -// Note that during race (e.g. an xDS response is received while the user is -// calling cancel()), there's a small window where the callback can be called -// after the watcher is canceled. The caller needs to handle this case. -func (c *clientImpl) WatchEndpoints(clusterName string, cb func(EndpointsUpdate, error)) (cancel func()) { - wi := &watchInfo{ - c: c, - rType: EndpointsResource, - target: clusterName, - edsCallback: cb, - } - - wi.expiryTimer = time.AfterFunc(c.watchExpiryTimeout, func() { - wi.timeout() - }) - return c.watch(wi) -} diff --git a/xds/internal/client/watchers_cluster_test.go b/xds/internal/client/watchers_cluster_test.go deleted file mode 100644 index fdef0cf61649..000000000000 --- a/xds/internal/client/watchers_cluster_test.go +++ /dev/null @@ -1,443 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/internal/testutils" -) - -type clusterUpdateErr struct { - u ClusterUpdate - err error -} - -// TestClusterWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name -// - an update is received after cancel() -func (s) TestClusterWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - clusterUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := ClusterUpdate{ServiceName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Another update, with an extra resource for a different resource name. - client.NewClusters(map[string]ClusterUpdate{ - testCDSName: wantUpdate, - "randomName": {}, - }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Cancel watch, and send update again. - cancelWatch() - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestClusterTwoWatchSameResourceName covers the case where an update is received -// after two watch() for the same resource name. -func (s) TestClusterTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - var clusterUpdateChs []*testutils.Channel - var cancelLastWatch func() - const count = 2 - for i := 0; i < count; i++ { - clusterUpdateCh := testutils.NewChannel() - clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) - cancelLastWatch = client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - wantUpdate := ClusterUpdate{ServiceName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - for i := 0; i < count; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate); err != nil { - t.Fatal(err) - } - } - - // Cancel the last watch, and send update again. - cancelLastWatch() - client.NewClusters(map[string]ClusterUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - for i := 0; i < count-1; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate); err != nil { - t.Fatal(err) - } - } - - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateChs[count-1].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestClusterThreeWatchDifferentResourceName covers the case where an update is -// received after three watch() for different resource names. -func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - // Two watches for the same name. - var clusterUpdateChs []*testutils.Channel - const count = 2 - for i := 0; i < count; i++ { - clusterUpdateCh := testutils.NewChannel() - clusterUpdateChs = append(clusterUpdateChs, clusterUpdateCh) - client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - // Third watch for a different name. - clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdate{ - testCDSName + "1": wantUpdate1, - testCDSName + "2": wantUpdate2, - }, UpdateMetadata{}) - - for i := 0; i < count; i++ { - if err := verifyClusterUpdate(ctx, clusterUpdateChs[i], wantUpdate1); err != nil { - t.Fatal(err) - } - } - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } -} - -// TestClusterWatchAfterCache covers the case where watch is called after the update -// is in cache. -func (s) TestClusterWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := ClusterUpdate{ServiceName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{ - testCDSName: wantUpdate, - }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Another watch for the resource in cache. - clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName, func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err}) - }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if n, err := apiClient.addWatches[ClusterResource].Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) - } - - // New watch should receives the update. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate); err != nil { - t.Fatal(err) - } - - // Old watch should see nothing. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestClusterWatchExpiryTimer tests the case where the client does not receive -// an CDS response for the request that it sends out. We want the watch callback -// to be invoked with an error once the watchExpiryTimer fires. -func (s) TestClusterWatchExpiryTimer(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, true)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: u, err: err}) - }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - u, err := clusterUpdateCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for cluster update: %v", err) - } - gotUpdate := u.(clusterUpdateErr) - if gotUpdate.err == nil || !cmp.Equal(gotUpdate.u, ClusterUpdate{}) { - t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.u, gotUpdate.err) - } -} - -// TestClusterWatchExpiryTimerStop tests the case where the client does receive -// an CDS response for the request that it sends out. We want no error even -// after expiry timeout. -func (s) TestClusterWatchExpiryTimerStop(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, true)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - clusterUpdateCh := testutils.NewChannel() - client.WatchCluster(testCDSName, func(u ClusterUpdate, err error) { - clusterUpdateCh.Send(clusterUpdateErr{u: u, err: err}) - }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := ClusterUpdate{ServiceName: testEDSName} - client.NewClusters(map[string]ClusterUpdate{ - testCDSName: wantUpdate, - }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Wait for an error, the error should never happen. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestWatchExpiryTimeout) - defer sCancel() - if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestClusterResourceRemoved covers the cases: -// - an update is received after a watch() -// - another update is received, with one resource removed -// - this should trigger callback with resource removed error -// - one more update without the removed resource -// - the callback (above) shouldn't receive any update -func (s) TestClusterResourceRemoved(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - clusterUpdateCh1 := testutils.NewChannel() - client.WatchCluster(testCDSName+"1", func(update ClusterUpdate, err error) { - clusterUpdateCh1.Send(clusterUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - // Another watch for a different name. - clusterUpdateCh2 := testutils.NewChannel() - client.WatchCluster(testCDSName+"2", func(update ClusterUpdate, err error) { - clusterUpdateCh2.Send(clusterUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ClusterResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := ClusterUpdate{ServiceName: testEDSName + "1"} - wantUpdate2 := ClusterUpdate{ServiceName: testEDSName + "2"} - client.NewClusters(map[string]ClusterUpdate{ - testCDSName + "1": wantUpdate1, - testCDSName + "2": wantUpdate2, - }, UpdateMetadata{}) - if err := verifyClusterUpdate(ctx, clusterUpdateCh1, wantUpdate1); err != nil { - t.Fatal(err) - } - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } - - // Send another update to remove resource 1. - client.NewClusters(map[string]ClusterUpdate{testCDSName + "2": wantUpdate2}, UpdateMetadata{}) - - // Watcher 1 should get an error. - if u, err := clusterUpdateCh1.Receive(ctx); err != nil || ErrType(u.(clusterUpdateErr).err) != ErrorTypeResourceNotFound { - t.Errorf("unexpected clusterUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) - } - - // Watcher 2 should get the same update again. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } - - // Send one more update without resource 1. - client.NewClusters(map[string]ClusterUpdate{testCDSName + "2": wantUpdate2}, UpdateMetadata{}) - - // Watcher 1 should not see an update. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := clusterUpdateCh1.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err) - } - - // Watcher 2 should get the same update again. - if err := verifyClusterUpdate(ctx, clusterUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } -} diff --git a/xds/internal/client/watchers_endpoints_test.go b/xds/internal/client/watchers_endpoints_test.go deleted file mode 100644 index b79397414d4a..000000000000 --- a/xds/internal/client/watchers_endpoints_test.go +++ /dev/null @@ -1,332 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/internal/testutils" - "google.golang.org/grpc/xds/internal" -) - -var ( - testLocalities = []Locality{ - { - Endpoints: []Endpoint{{Address: "addr1:314"}}, - ID: internal.LocalityID{SubZone: "locality-1"}, - Priority: 1, - Weight: 1, - }, - { - Endpoints: []Endpoint{{Address: "addr2:159"}}, - ID: internal.LocalityID{SubZone: "locality-2"}, - Priority: 0, - Weight: 1, - }, - } -) - -type endpointsUpdateErr struct { - u EndpointsUpdate - err error -} - -// TestEndpointsWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name (which doesn't trigger callback) -// - an update is received after cancel() -func (s) TestEndpointsWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - endpointsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Another update for a different resource name. - client.NewEndpoints(map[string]EndpointsUpdate{"randomName": {}}, UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) - } - - // Cancel watch, and send update again. - cancelWatch() - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestEndpointsTwoWatchSameResourceName covers the case where an update is received -// after two watch() for the same resource name. -func (s) TestEndpointsTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - const count = 2 - var ( - endpointsUpdateChs []*testutils.Channel - cancelLastWatch func() - ) - for i := 0; i < count; i++ { - endpointsUpdateCh := testutils.NewChannel() - endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) - cancelLastWatch = client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - for i := 0; i < count; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate); err != nil { - t.Fatal(err) - } - } - - // Cancel the last watch, and send update again. - cancelLastWatch() - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - for i := 0; i < count-1; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate); err != nil { - t.Fatal(err) - } - } - - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateChs[count-1].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestEndpointsThreeWatchDifferentResourceName covers the case where an update is -// received after three watch() for different resource names. -func (s) TestEndpointsThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - // Two watches for the same name. - var endpointsUpdateChs []*testutils.Channel - const count = 2 - for i := 0; i < count; i++ { - endpointsUpdateCh := testutils.NewChannel() - endpointsUpdateChs = append(endpointsUpdateChs, endpointsUpdateCh) - client.WatchEndpoints(testCDSName+"1", func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - // Third watch for a different name. - endpointsUpdateCh2 := testutils.NewChannel() - client.WatchEndpoints(testCDSName+"2", func(update EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(endpointsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - wantUpdate2 := EndpointsUpdate{Localities: []Locality{testLocalities[1]}} - client.NewEndpoints(map[string]EndpointsUpdate{ - testCDSName + "1": wantUpdate1, - testCDSName + "2": wantUpdate2, - }, UpdateMetadata{}) - - for i := 0; i < count; i++ { - if err := verifyEndpointsUpdate(ctx, endpointsUpdateChs[i], wantUpdate1); err != nil { - t.Fatal(err) - } - } - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } -} - -// TestEndpointsWatchAfterCache covers the case where watch is called after the update -// is in cache. -func (s) TestEndpointsWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - endpointsUpdateCh := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := EndpointsUpdate{Localities: []Locality{testLocalities[0]}} - client.NewEndpoints(map[string]EndpointsUpdate{testCDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Another watch for the resource in cache. - endpointsUpdateCh2 := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh2.Send(endpointsUpdateErr{u: update, err: err}) - }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if n, err := apiClient.addWatches[EndpointsResource].Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) - } - - // New watch should receives the update. - if err := verifyEndpointsUpdate(ctx, endpointsUpdateCh2, wantUpdate); err != nil { - t.Fatal(err) - } - - // Old watch should see nothing. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := endpointsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected endpointsUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestEndpointsWatchExpiryTimer tests the case where the client does not receive -// an CDS response for the request that it sends out. We want the watch callback -// to be invoked with an error once the watchExpiryTimer fires. -func (s) TestEndpointsWatchExpiryTimer(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, true)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - endpointsUpdateCh := testutils.NewChannel() - client.WatchEndpoints(testCDSName, func(update EndpointsUpdate, err error) { - endpointsUpdateCh.Send(endpointsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[EndpointsResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - u, err := endpointsUpdateCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for endpoints update: %v", err) - } - gotUpdate := u.(endpointsUpdateErr) - if gotUpdate.err == nil || !cmp.Equal(gotUpdate.u, EndpointsUpdate{}) { - t.Fatalf("unexpected endpointsUpdate: (%v, %v), want: (EndpointsUpdate{}, nil)", gotUpdate.u, gotUpdate.err) - } -} diff --git a/xds/internal/client/watchers_listener_test.go b/xds/internal/client/watchers_listener_test.go deleted file mode 100644 index bf3a122da075..000000000000 --- a/xds/internal/client/watchers_listener_test.go +++ /dev/null @@ -1,358 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "context" - "testing" - - "google.golang.org/grpc/internal/testutils" -) - -type ldsUpdateErr struct { - u ListenerUpdate - err error -} - -// TestLDSWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name -// - an update is received after cancel() -func (s) TestLDSWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - ldsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Another update, with an extra resource for a different resource name. - client.NewListeners(map[string]ListenerUpdate{ - testLDSName: wantUpdate, - "randomName": {}, - }, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Cancel watch, and send update again. - cancelWatch() - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestLDSTwoWatchSameResourceName covers the case where an update is received -// after two watch() for the same resource name. -func (s) TestLDSTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - const count = 2 - var ( - ldsUpdateChs []*testutils.Channel - cancelLastWatch func() - ) - - for i := 0; i < count; i++ { - ldsUpdateCh := testutils.NewChannel() - ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) - cancelLastWatch = client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) - for i := 0; i < count; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate); err != nil { - t.Fatal(err) - } - } - - // Cancel the last watch, and send update again. - cancelLastWatch() - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) - for i := 0; i < count-1; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate); err != nil { - t.Fatal(err) - } - } - - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateChs[count-1].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestLDSThreeWatchDifferentResourceName covers the case where an update is -// received after three watch() for different resource names. -func (s) TestLDSThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - var ldsUpdateChs []*testutils.Channel - const count = 2 - - // Two watches for the same name. - for i := 0; i < count; i++ { - ldsUpdateCh := testutils.NewChannel() - ldsUpdateChs = append(ldsUpdateChs, ldsUpdateCh) - client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - // Third watch for a different name. - ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := ListenerUpdate{RouteConfigName: testRDSName + "1"} - wantUpdate2 := ListenerUpdate{RouteConfigName: testRDSName + "2"} - client.NewListeners(map[string]ListenerUpdate{ - testLDSName + "1": wantUpdate1, - testLDSName + "2": wantUpdate2, - }, UpdateMetadata{}) - - for i := 0; i < count; i++ { - if err := verifyListenerUpdate(ctx, ldsUpdateChs[i], wantUpdate1); err != nil { - t.Fatal(err) - } - } - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } -} - -// TestLDSWatchAfterCache covers the case where watch is called after the update -// is in cache. -func (s) TestLDSWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - ldsUpdateCh := testutils.NewChannel() - client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh.Send(ldsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := ListenerUpdate{RouteConfigName: testRDSName} - client.NewListeners(map[string]ListenerUpdate{testLDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Another watch for the resource in cache. - ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName, func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err}) - }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if n, err := apiClient.addWatches[ListenerResource].Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) - } - - // New watch should receive the update. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate); err != nil { - t.Fatal(err) - } - - // Old watch should see nothing. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestLDSResourceRemoved covers the cases: -// - an update is received after a watch() -// - another update is received, with one resource removed -// - this should trigger callback with resource removed error -// - one more update without the removed resource -// - the callback (above) shouldn't receive any update -func (s) TestLDSResourceRemoved(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - ldsUpdateCh1 := testutils.NewChannel() - client.WatchListener(testLDSName+"1", func(update ListenerUpdate, err error) { - ldsUpdateCh1.Send(ldsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - // Another watch for a different name. - ldsUpdateCh2 := testutils.NewChannel() - client.WatchListener(testLDSName+"2", func(update ListenerUpdate, err error) { - ldsUpdateCh2.Send(ldsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[ListenerResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := ListenerUpdate{RouteConfigName: testEDSName + "1"} - wantUpdate2 := ListenerUpdate{RouteConfigName: testEDSName + "2"} - client.NewListeners(map[string]ListenerUpdate{ - testLDSName + "1": wantUpdate1, - testLDSName + "2": wantUpdate2, - }, UpdateMetadata{}) - if err := verifyListenerUpdate(ctx, ldsUpdateCh1, wantUpdate1); err != nil { - t.Fatal(err) - } - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } - - // Send another update to remove resource 1. - client.NewListeners(map[string]ListenerUpdate{testLDSName + "2": wantUpdate2}, UpdateMetadata{}) - - // Watcher 1 should get an error. - if u, err := ldsUpdateCh1.Receive(ctx); err != nil || ErrType(u.(ldsUpdateErr).err) != ErrorTypeResourceNotFound { - t.Errorf("unexpected ListenerUpdate: %v, error receiving from channel: %v, want update with error resource not found", u, err) - } - - // Watcher 2 should get the same update again. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } - - // Send one more update without resource 1. - client.NewListeners(map[string]ListenerUpdate{testLDSName + "2": wantUpdate2}, UpdateMetadata{}) - - // Watcher 1 should not see an update. - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := ldsUpdateCh1.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected ListenerUpdate: %v, want receiving from channel timeout", u) - } - - // Watcher 2 should get the same update again. - if err := verifyListenerUpdate(ctx, ldsUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } -} diff --git a/xds/internal/client/watchers_route_test.go b/xds/internal/client/watchers_route_test.go deleted file mode 100644 index 5f44e5493330..000000000000 --- a/xds/internal/client/watchers_route_test.go +++ /dev/null @@ -1,310 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - - "google.golang.org/grpc/internal/testutils" -) - -type rdsUpdateErr struct { - u RouteConfigUpdate - err error -} - -// TestRDSWatch covers the cases: -// - an update is received after a watch() -// - an update for another resource name (which doesn't trigger callback) -// - an update is received after cancel() -func (s) TestRDSWatch(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - rdsUpdateCh := testutils.NewChannel() - cancelWatch := client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, - }, - }, - } - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Another update for a different resource name. - client.NewRouteConfigs(map[string]RouteConfigUpdate{"randomName": {}}, UpdateMetadata{}) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) - } - - // Cancel watch, and send update again. - cancelWatch() - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestRDSTwoWatchSameResourceName covers the case where an update is received -// after two watch() for the same resource name. -func (s) TestRDSTwoWatchSameResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - const count = 2 - var ( - rdsUpdateChs []*testutils.Channel - cancelLastWatch func() - ) - for i := 0; i < count; i++ { - rdsUpdateCh := testutils.NewChannel() - rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) - cancelLastWatch = client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, - }, - }, - } - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) - for i := 0; i < count; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate); err != nil { - t.Fatal(err) - } - } - - // Cancel the last watch, and send update again. - cancelLastWatch() - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) - for i := 0; i < count-1; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate); err != nil { - t.Fatal(err) - } - } - - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateChs[count-1].Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) - } -} - -// TestRDSThreeWatchDifferentResourceName covers the case where an update is -// received after three watch() for different resource names. -func (s) TestRDSThreeWatchDifferentResourceName(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - // Two watches for the same name. - var rdsUpdateChs []*testutils.Channel - const count = 2 - for i := 0; i < count; i++ { - rdsUpdateCh := testutils.NewChannel() - rdsUpdateChs = append(rdsUpdateChs, rdsUpdateCh) - client.WatchRouteConfig(testRDSName+"1", func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) - }) - - if i == 0 { - // A new watch is registered on the underlying API client only for - // the first iteration because we are using the same resource name. - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - } - } - - // Third watch for a different name. - rdsUpdateCh2 := testutils.NewChannel() - client.WatchRouteConfig(testRDSName+"2", func(update RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(rdsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate1 := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName + "1": {Weight: 1}}}}, - }, - }, - } - wantUpdate2 := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName + "2": {Weight: 1}}}}, - }, - }, - } - client.NewRouteConfigs(map[string]RouteConfigUpdate{ - testRDSName + "1": wantUpdate1, - testRDSName + "2": wantUpdate2, - }, UpdateMetadata{}) - - for i := 0; i < count; i++ { - if err := verifyRouteConfigUpdate(ctx, rdsUpdateChs[i], wantUpdate1); err != nil { - t.Fatal(err) - } - } - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh2, wantUpdate2); err != nil { - t.Fatal(err) - } -} - -// TestRDSWatchAfterCache covers the case where watch is called after the update -// is in cache. -func (s) TestRDSWatchAfterCache(t *testing.T) { - apiClientCh, cleanup := overrideNewAPIClient() - defer cleanup() - - client, err := newWithConfig(clientOpts(testXDSServer, false)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } - defer client.Close() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - c, err := apiClientCh.Receive(ctx) - if err != nil { - t.Fatalf("timeout when waiting for API client to be created: %v", err) - } - apiClient := c.(*testAPIClient) - - rdsUpdateCh := testutils.NewChannel() - client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh.Send(rdsUpdateErr{u: update, err: err}) - }) - if _, err := apiClient.addWatches[RouteConfigResource].Receive(ctx); err != nil { - t.Fatalf("want new watch to start, got error %v", err) - } - - wantUpdate := RouteConfigUpdate{ - VirtualHosts: []*VirtualHost{ - { - Domains: []string{testLDSName}, - Routes: []*Route{{Prefix: newStringP(""), WeightedClusters: map[string]WeightedCluster{testCDSName: {Weight: 1}}}}, - }, - }, - } - client.NewRouteConfigs(map[string]RouteConfigUpdate{testRDSName: wantUpdate}, UpdateMetadata{}) - if err := verifyRouteConfigUpdate(ctx, rdsUpdateCh, wantUpdate); err != nil { - t.Fatal(err) - } - - // Another watch for the resource in cache. - rdsUpdateCh2 := testutils.NewChannel() - client.WatchRouteConfig(testRDSName, func(update RouteConfigUpdate, err error) { - rdsUpdateCh2.Send(rdsUpdateErr{u: update, err: err}) - }) - sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if n, err := apiClient.addWatches[RouteConfigResource].Receive(sCtx); err != context.DeadlineExceeded { - t.Fatalf("want no new watch to start (recv timeout), got resource name: %v error %v", n, err) - } - - // New watch should receives the update. - if u, err := rdsUpdateCh2.Receive(ctx); err != nil || !cmp.Equal(u, rdsUpdateErr{wantUpdate, nil}, cmp.AllowUnexported(rdsUpdateErr{})) { - t.Errorf("unexpected RouteConfigUpdate: %v, error receiving from channel: %v", u, err) - } - - // Old watch should see nothing. - sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) - defer sCancel() - if u, err := rdsUpdateCh.Receive(sCtx); err != context.DeadlineExceeded { - t.Errorf("unexpected RouteConfigUpdate: %v, %v, want channel recv timeout", u, err) - } -} diff --git a/xds/internal/client/xds.go b/xds/internal/client/xds.go deleted file mode 100644 index 2791603ce26e..000000000000 --- a/xds/internal/client/xds.go +++ /dev/null @@ -1,979 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package client - -import ( - "errors" - "fmt" - "net" - "strconv" - "strings" - "time" - - v1typepb "github.com/cncf/udpa/go/udpa/type/v1" - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc/internal/grpclog" - "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal" - "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/version" -) - -// TransportSocket proto message has a `name` field which is expected to be set -// to this value by the management server. -const transportSocketName = "envoy.transport_sockets.tls" - -// UnmarshalListener processes resources received in an LDS response, validates -// them, and transforms them into a native struct which contains only fields we -// are interested in. -func UnmarshalListener(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ListenerUpdate, UpdateMetadata, error) { - update := make(map[string]ListenerUpdate) - md, err := processAllResources(version, resources, logger, update) - return update, md, err -} - -func unmarshalListenerResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ListenerUpdate, error) { - if !IsListenerResource(r.GetTypeUrl()) { - return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2ListenerURL - lis := &v3listenerpb.Listener{} - if err := proto.Unmarshal(r.GetValue(), lis); err != nil { - return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", lis.GetName(), lis, lis) - - lu, err := processListener(lis, v2) - if err != nil { - return lis.GetName(), ListenerUpdate{}, err - } - lu.Raw = r - return lis.GetName(), *lu, nil -} - -func processListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUpdate, error) { - if lis.GetApiListener() != nil { - return processClientSideListener(lis, v2) - } - return processServerSideListener(lis) -} - -// processClientSideListener checks if the provided Listener proto meets -// the expected criteria. If so, it returns a non-empty routeConfigName. -func processClientSideListener(lis *v3listenerpb.Listener, v2 bool) (*ListenerUpdate, error) { - update := &ListenerUpdate{} - - apiLisAny := lis.GetApiListener().GetApiListener() - if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { - return nil, fmt.Errorf("unexpected resource type: %q", apiLisAny.GetTypeUrl()) - } - apiLis := &v3httppb.HttpConnectionManager{} - if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { - return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err) - } - - switch apiLis.RouteSpecifier.(type) { - case *v3httppb.HttpConnectionManager_Rds: - if apiLis.GetRds().GetConfigSource().GetAds() == nil { - return nil, fmt.Errorf("ConfigSource is not ADS: %+v", lis) - } - name := apiLis.GetRds().GetRouteConfigName() - if name == "" { - return nil, fmt.Errorf("empty route_config_name: %+v", lis) - } - update.RouteConfigName = name - case *v3httppb.HttpConnectionManager_RouteConfig: - // TODO: Add support for specifying the RouteConfiguration inline - // in the LDS response. - return nil, fmt.Errorf("LDS response contains RDS config inline. Not supported for now: %+v", apiLis) - case nil: - return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) - default: - return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) - } - - if v2 { - return update, nil - } - - // The following checks and fields only apply to xDS protocol versions v3+. - - update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() - - var err error - if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { - return nil, err - } - - return update, nil -} - -func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { - // The real type name is inside the TypedStruct. - s := new(v1typepb.TypedStruct) - if !ptypes.Is(config, s) { - return config, config.GetTypeUrl(), nil - } - if err := ptypes.UnmarshalAny(config, s); err != nil { - return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) - } - return s, s.GetTypeUrl(), nil -} - -func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { - config, typeURL, err := unwrapHTTPFilterConfig(cfg) - if err != nil { - return nil, nil, err - } - filterBuilder := httpfilter.Get(typeURL) - if filterBuilder == nil { - if optional { - return nil, nil, nil - } - return nil, nil, fmt.Errorf("no filter implementation found for %q", typeURL) - } - parseFunc := filterBuilder.ParseFilterConfig - if !lds { - parseFunc = filterBuilder.ParseFilterConfigOverride - } - filterConfig, err := parseFunc(config) - if err != nil { - return nil, nil, fmt.Errorf("error parsing config for filter %q: %v", typeURL, err) - } - return filterBuilder, filterConfig, nil -} - -func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { - if !env.FaultInjectionSupport || len(cfgs) == 0 { - return nil, nil - } - m := make(map[string]httpfilter.FilterConfig) - for name, cfg := range cfgs { - optional := false - s := new(v3routepb.FilterConfig) - if ptypes.Is(cfg, s) { - if err := ptypes.UnmarshalAny(cfg, s); err != nil { - return nil, fmt.Errorf("filter override %q: error unmarshalling FilterConfig: %v", name, err) - } - cfg = s.GetConfig() - optional = s.GetIsOptional() - } - - httpFilter, config, err := validateHTTPFilterConfig(cfg, false, optional) - if err != nil { - return nil, fmt.Errorf("filter override %q: %v", name, err) - } - if httpFilter == nil { - // Optional configs are ignored. - continue - } - m[name] = config - } - return m, nil -} - -func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { - if !env.FaultInjectionSupport { - return nil, nil - } - - ret := make([]HTTPFilter, 0, len(filters)) - seenNames := make(map[string]bool, len(filters)) - for _, filter := range filters { - name := filter.GetName() - if name == "" { - return nil, errors.New("filter missing name field") - } - if seenNames[name] { - return nil, fmt.Errorf("duplicate filter name %q", name) - } - seenNames[name] = true - - httpFilter, config, err := validateHTTPFilterConfig(filter.GetTypedConfig(), true, filter.GetIsOptional()) - if err != nil { - return nil, err - } - if httpFilter == nil { - // Optional configs are ignored. - continue - } - if server { - if _, ok := httpFilter.(httpfilter.ServerInterceptorBuilder); !ok { - if filter.GetIsOptional() { - continue - } - return nil, fmt.Errorf("HTTP filter %q not supported server-side", name) - } - } else if _, ok := httpFilter.(httpfilter.ClientInterceptorBuilder); !ok { - if filter.GetIsOptional() { - continue - } - return nil, fmt.Errorf("HTTP filter %q not supported client-side", name) - } - - // Save name/config - ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) - } - return ret, nil -} - -func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { - if n := len(lis.ListenerFilters); n != 0 { - return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) - } - if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { - return nil, errors.New("unsupported field 'use_original_dst' is present and set to true") - } - addr := lis.GetAddress() - if addr == nil { - return nil, fmt.Errorf("no address field in LDS response: %+v", lis) - } - sockAddr := addr.GetSocketAddress() - if sockAddr == nil { - return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) - } - lu := &ListenerUpdate{ - InboundListenerCfg: &InboundListenerConfig{ - Address: sockAddr.GetAddress(), - Port: strconv.Itoa(int(sockAddr.GetPortValue())), - }, - } - - var filterChains []*FilterChain - for _, fc := range lis.GetFilterChains() { - filterChain, err := getFilterChain(fc) - if err != nil { - return nil, err - } - filterChains = append(filterChains, filterChain) - } - defaultFilterChain, err := getFilterChain(lis.GetDefaultFilterChain()) - if err != nil { - return nil, err - } - if len(filterChains) == 0 && defaultFilterChain == nil { - return nil, fmt.Errorf("xds: no supported filter chains and no default filter chain") - } - lu.InboundListenerCfg.FilterChains = filterChains - lu.InboundListenerCfg.DefaultFilterChain = defaultFilterChain - return lu, nil -} - -// getFilterChain parses the filter chain proto and converts it into the local -// representation. If fc contains unsupported filter chain match fields, a nil -// FilterChain object and a nil error are returned. If fc does not parse or -// contains other invalid data, an non-nil error is returned. -func getFilterChain(fc *v3listenerpb.FilterChain) (*FilterChain, error) { - if fc == nil { - return nil, nil - } - - // If the match criteria contains unsupported fields, skip the filter chain. - fcm := fc.GetFilterChainMatch() - if fcm.GetDestinationPort().GetValue() != 0 || - fcm.GetServerNames() != nil || - (fcm.GetTransportProtocol() != "" && fcm.TransportProtocol != "raw_buffer") || - fcm.GetApplicationProtocols() != nil { - return nil, nil - } - - // Extract the supported match criteria. - var dstPrefixRanges []net.IP - for _, pr := range fcm.GetPrefixRanges() { - cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) - ip, _, err := net.ParseCIDR(cidr) - if err != nil { - return nil, fmt.Errorf("xds: failed to parse destination prefix range: %+v", pr) - } - dstPrefixRanges = append(dstPrefixRanges, ip) - } - var srcType SourceType - switch fcm.GetSourceType() { - case v3listenerpb.FilterChainMatch_ANY: - srcType = SourceTypeAny - case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: - srcType = SourceTypeSameOrLoopback - case v3listenerpb.FilterChainMatch_EXTERNAL: - srcType = SourceTypeExternal - default: - return nil, fmt.Errorf("xds: unsupported source type: %v", fcm.GetSourceType()) - } - var srcPrefixRanges []net.IP - for _, pr := range fcm.GetSourcePrefixRanges() { - cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) - ip, _, err := net.ParseCIDR(cidr) - if err != nil { - return nil, fmt.Errorf("xds: failed to parse source prefix range: %+v", pr) - } - srcPrefixRanges = append(srcPrefixRanges, ip) - } - filterChain := &FilterChain{ - Match: &FilterChainMatch{ - DestPrefixRanges: dstPrefixRanges, - SourceType: srcType, - SourcePrefixRanges: srcPrefixRanges, - SourcePorts: fcm.GetSourcePorts(), - }, - } - - // If the transport_socket field is not specified, it means that the control - // plane has not sent us any security config. This is fine and the server - // will use the fallback credentials configured as part of the - // xdsCredentials. - ts := fc.GetTransportSocket() - if ts == nil { - return filterChain, nil - } - if name := ts.GetName(); name != transportSocketName { - return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) - } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3DownstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) - } - downstreamCtx := &v3tlspb.DownstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil { - return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) - } - if downstreamCtx.GetCommonTlsContext() == nil { - return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") - } - sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext()) - if err != nil { - return nil, err - } - if sc.IdentityInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") - } - sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() - if sc.RequireClientCert && sc.RootInstanceName == "" { - return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") - } - filterChain.SecurityCfg = sc - return filterChain, nil -} - -// UnmarshalRouteConfig processes resources received in an RDS response, -// validates them, and transforms them into a native struct which contains only -// fields we are interested in. The provided hostname determines the route -// configuration resources of interest. -func UnmarshalRouteConfig(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]RouteConfigUpdate, UpdateMetadata, error) { - update := make(map[string]RouteConfigUpdate) - md, err := processAllResources(version, resources, logger, update) - return update, md, err -} - -func unmarshalRouteConfigResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, RouteConfigUpdate, error) { - if !IsRouteConfigResource(r.GetTypeUrl()) { - return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - rc := &v3routepb.RouteConfiguration{} - if err := proto.Unmarshal(r.GetValue(), rc); err != nil { - return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v.", rc.GetName(), rc, rc) - - // TODO: Pass version.TransportAPI instead of relying upon the type URL - v2 := r.GetTypeUrl() == version.V2RouteConfigURL - u, err := generateRDSUpdateFromRouteConfiguration(rc, logger, v2) - if err != nil { - return rc.GetName(), RouteConfigUpdate{}, err - } - u.Raw = r - return rc.GetName(), u, nil -} - -// generateRDSUpdateFromRouteConfiguration checks if the provided -// RouteConfiguration meets the expected criteria. If so, it returns a -// RouteConfigUpdate with nil error. -// -// A RouteConfiguration resource is considered valid when only if it contains a -// VirtualHost whose domain field matches the server name from the URI passed -// to the gRPC channel, and it contains a clusterName or a weighted cluster. -// -// The RouteConfiguration includes a list of VirtualHosts, which may have zero -// or more elements. We are interested in the element whose domains field -// matches the server name specified in the "xds:" URI. The only field in the -// VirtualHost proto that the we are interested in is the list of routes. We -// only look at the last route in the list (the default route), whose match -// field must be empty and whose route field must be set. Inside that route -// message, the cluster field will contain the clusterName or weighted clusters -// we are looking for. -func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration, logger *grpclog.PrefixLogger, v2 bool) (RouteConfigUpdate, error) { - var vhs []*VirtualHost - for _, vh := range rc.GetVirtualHosts() { - routes, err := routesProtoToSlice(vh.Routes, logger, v2) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) - } - vhOut := &VirtualHost{ - Domains: vh.GetDomains(), - Routes: routes, - } - if !v2 { - cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) - if err != nil { - return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) - } - vhOut.HTTPFilterConfigOverride = cfgs - } - vhs = append(vhs, vhOut) - } - return RouteConfigUpdate{VirtualHosts: vhs}, nil -} - -func routesProtoToSlice(routes []*v3routepb.Route, logger *grpclog.PrefixLogger, v2 bool) ([]*Route, error) { - var routesRet []*Route - - for _, r := range routes { - match := r.GetMatch() - if match == nil { - return nil, fmt.Errorf("route %+v doesn't have a match", r) - } - - if len(match.GetQueryParameters()) != 0 { - // Ignore route with query parameters. - logger.Warningf("route %+v has query parameter matchers, the route will be ignored", r) - continue - } - - pathSp := match.GetPathSpecifier() - if pathSp == nil { - return nil, fmt.Errorf("route %+v doesn't have a path specifier", r) - } - - var route Route - switch pt := pathSp.(type) { - case *v3routepb.RouteMatch_Prefix: - route.Prefix = &pt.Prefix - case *v3routepb.RouteMatch_Path: - route.Path = &pt.Path - case *v3routepb.RouteMatch_SafeRegex: - route.Regex = &pt.SafeRegex.Regex - default: - return nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) - } - - if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { - route.CaseInsensitive = !caseSensitive.Value - } - - for _, h := range match.GetHeaders() { - var header HeaderMatcher - switch ht := h.GetHeaderMatchSpecifier().(type) { - case *v3routepb.HeaderMatcher_ExactMatch: - header.ExactMatch = &ht.ExactMatch - case *v3routepb.HeaderMatcher_SafeRegexMatch: - header.RegexMatch = &ht.SafeRegexMatch.Regex - case *v3routepb.HeaderMatcher_RangeMatch: - header.RangeMatch = &Int64Range{ - Start: ht.RangeMatch.Start, - End: ht.RangeMatch.End, - } - case *v3routepb.HeaderMatcher_PresentMatch: - header.PresentMatch = &ht.PresentMatch - case *v3routepb.HeaderMatcher_PrefixMatch: - header.PrefixMatch = &ht.PrefixMatch - case *v3routepb.HeaderMatcher_SuffixMatch: - header.SuffixMatch = &ht.SuffixMatch - default: - return nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) - } - header.Name = h.GetName() - invert := h.GetInvertMatch() - header.InvertMatch = &invert - route.Headers = append(route.Headers, &header) - } - - if fr := match.GetRuntimeFraction(); fr != nil { - d := fr.GetDefaultValue() - n := d.GetNumerator() - switch d.GetDenominator() { - case v3typepb.FractionalPercent_HUNDRED: - n *= 10000 - case v3typepb.FractionalPercent_TEN_THOUSAND: - n *= 100 - case v3typepb.FractionalPercent_MILLION: - } - route.Fraction = &n - } - - route.WeightedClusters = make(map[string]WeightedCluster) - action := r.GetRoute() - switch a := action.GetClusterSpecifier().(type) { - case *v3routepb.RouteAction_Cluster: - route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} - case *v3routepb.RouteAction_WeightedClusters: - wcs := a.WeightedClusters - var totalWeight uint32 - for _, c := range wcs.Clusters { - w := c.GetWeight().GetValue() - if w == 0 { - continue - } - wc := WeightedCluster{Weight: w} - if !v2 { - cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) - if err != nil { - return nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) - } - wc.HTTPFilterConfigOverride = cfgs - } - route.WeightedClusters[c.GetName()] = wc - totalWeight += w - } - if totalWeight != wcs.GetTotalWeight().GetValue() { - return nil, fmt.Errorf("route %+v, action %+v, weights of clusters do not add up to total total weight, got: %v, want %v", r, a, wcs.GetTotalWeight().GetValue(), totalWeight) - } - if totalWeight == 0 { - return nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) - } - case *v3routepb.RouteAction_ClusterHeader: - continue - } - - msd := action.GetMaxStreamDuration() - // Prefer grpc_timeout_header_max, if set. - dur := msd.GetGrpcTimeoutHeaderMax() - if dur == nil { - dur = msd.GetMaxStreamDuration() - } - if dur != nil { - d := dur.AsDuration() - route.MaxStreamDuration = &d - } - - if !v2 { - cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) - if err != nil { - return nil, fmt.Errorf("route %+v: %v", r, err) - } - route.HTTPFilterConfigOverride = cfgs - } - routesRet = append(routesRet, &route) - } - return routesRet, nil -} - -// UnmarshalCluster processes resources received in an CDS response, validates -// them, and transforms them into a native struct which contains only fields we -// are interested in. -func UnmarshalCluster(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]ClusterUpdate, UpdateMetadata, error) { - update := make(map[string]ClusterUpdate) - md, err := processAllResources(version, resources, logger, update) - return update, md, err -} - -func unmarshalClusterResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, ClusterUpdate, error) { - if !IsClusterResource(r.GetTypeUrl()) { - return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - - cluster := &v3clusterpb.Cluster{} - if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { - return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cluster.GetName(), cluster, cluster) - - cu, err := validateCluster(cluster) - if err != nil { - return cluster.GetName(), ClusterUpdate{}, err - } - cu.Raw = r - // If the Cluster message in the CDS response did not contain a - // serviceName, we will just use the clusterName for EDS. - if cu.ServiceName == "" { - cu.ServiceName = cluster.GetName() - } - return cluster.GetName(), cu, nil -} - -func validateCluster(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { - emptyUpdate := ClusterUpdate{ServiceName: "", EnableLRS: false} - switch { - case cluster.GetType() != v3clusterpb.Cluster_EDS: - return emptyUpdate, fmt.Errorf("unexpected cluster type %v in response: %+v", cluster.GetType(), cluster) - case cluster.GetEdsClusterConfig().GetEdsConfig().GetAds() == nil: - return emptyUpdate, fmt.Errorf("unexpected edsConfig in response: %+v", cluster) - case cluster.GetLbPolicy() != v3clusterpb.Cluster_ROUND_ROBIN: - return emptyUpdate, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) - } - - // Process security configuration received from the control plane iff the - // corresponding environment variable is set. - var sc *SecurityConfig - if env.ClientSideSecuritySupport { - var err error - if sc, err = securityConfigFromCluster(cluster); err != nil { - return emptyUpdate, err - } - } - - return ClusterUpdate{ - ServiceName: cluster.GetEdsClusterConfig().GetServiceName(), - EnableLRS: cluster.GetLrsServer().GetSelf() != nil, - SecurityCfg: sc, - MaxRequests: circuitBreakersFromCluster(cluster), - }, nil -} - -// securityConfigFromCluster extracts the relevant security configuration from -// the received Cluster resource. -func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { - // The Cluster resource contains a `transport_socket` field, which contains - // a oneof `typed_config` field of type `protobuf.Any`. The any proto - // contains a marshaled representation of an `UpstreamTlsContext` message. - ts := cluster.GetTransportSocket() - if ts == nil { - return nil, nil - } - if name := ts.GetName(); name != transportSocketName { - return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) - } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) - } - upstreamCtx := &v3tlspb.UpstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { - return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) - } - if upstreamCtx.GetCommonTlsContext() == nil { - return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") - } - - sc, err := securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext()) - if err != nil { - return nil, err - } - if sc.RootInstanceName == "" { - return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") - } - return sc, nil -} - -// common is expected to be not nil. -func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext) (*SecurityConfig, error) { - // The `CommonTlsContext` contains a - // `tls_certificate_certificate_provider_instance` field of type - // `CertificateProviderInstance`, which contains the provider instance name - // and the certificate name to fetch identity certs. - sc := &SecurityConfig{} - if identity := common.GetTlsCertificateCertificateProviderInstance(); identity != nil { - sc.IdentityInstanceName = identity.GetInstanceName() - sc.IdentityCertName = identity.GetCertificateName() - } - - // The `CommonTlsContext` contains a `validation_context_type` field which - // is a oneof. We can get the values that we are interested in from two of - // those possible values: - // - combined validation context: - // - contains a default validation context which holds the list of - // matchers for accepted SANs. - // - contains certificate provider instance configuration - // - certificate provider instance configuration - // - in this case, we do not get a list of accepted SANs. - switch t := common.GetValidationContextType().(type) { - case *v3tlspb.CommonTlsContext_CombinedValidationContext: - combined := common.GetCombinedValidationContext() - var matchers []xds.StringMatcher - if def := combined.GetDefaultValidationContext(); def != nil { - for _, m := range def.GetMatchSubjectAltNames() { - matcher, err := xds.StringMatcherFromProto(m) - if err != nil { - return nil, err - } - matchers = append(matchers, matcher) - } - } - sc.SubjectAltNameMatchers = matchers - if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { - sc.RootInstanceName = pi.GetInstanceName() - sc.RootCertName = pi.GetCertificateName() - } - case *v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance: - pi := common.GetValidationContextCertificateProviderInstance() - sc.RootInstanceName = pi.GetInstanceName() - sc.RootCertName = pi.GetCertificateName() - case nil: - // It is valid for the validation context to be nil on the server side. - default: - return nil, fmt.Errorf("validation context contains unexpected type: %T", t) - } - return sc, nil -} - -// circuitBreakersFromCluster extracts the circuit breakers configuration from -// the received cluster resource. Returns nil if no CircuitBreakers or no -// Thresholds in CircuitBreakers. -func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { - if !env.CircuitBreakingSupport { - return nil - } - for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { - if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { - continue - } - maxRequestsPb := threshold.GetMaxRequests() - if maxRequestsPb == nil { - return nil - } - maxRequests := maxRequestsPb.GetValue() - return &maxRequests - } - return nil -} - -// UnmarshalEndpoints processes resources received in an EDS response, -// validates them, and transforms them into a native struct which contains only -// fields we are interested in. -func UnmarshalEndpoints(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger) (map[string]EndpointsUpdate, UpdateMetadata, error) { - update := make(map[string]EndpointsUpdate) - md, err := processAllResources(version, resources, logger, update) - return update, md, err -} - -func unmarshalEndpointsResource(r *anypb.Any, logger *grpclog.PrefixLogger) (string, EndpointsUpdate, error) { - if !IsEndpointsResource(r.GetTypeUrl()) { - return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) - } - - cla := &v3endpointpb.ClusterLoadAssignment{} - if err := proto.Unmarshal(r.GetValue(), cla); err != nil { - return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) - } - logger.Infof("Resource with name: %v, type: %T, contains: %v", cla.GetClusterName(), cla, cla) - - u, err := parseEDSRespProto(cla) - if err != nil { - return cla.GetClusterName(), EndpointsUpdate{}, err - } - u.Raw = r - return cla.GetClusterName(), u, nil -} - -func parseAddress(socketAddress *v3corepb.SocketAddress) string { - return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) -} - -func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { - percentage := dropPolicy.GetDropPercentage() - var ( - numerator = percentage.GetNumerator() - denominator uint32 - ) - switch percentage.GetDenominator() { - case v3typepb.FractionalPercent_HUNDRED: - denominator = 100 - case v3typepb.FractionalPercent_TEN_THOUSAND: - denominator = 10000 - case v3typepb.FractionalPercent_MILLION: - denominator = 1000000 - } - return OverloadDropConfig{ - Category: dropPolicy.GetCategory(), - Numerator: numerator, - Denominator: denominator, - } -} - -func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint) []Endpoint { - endpoints := make([]Endpoint, 0, len(lbEndpoints)) - for _, lbEndpoint := range lbEndpoints { - endpoints = append(endpoints, Endpoint{ - HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), - Address: parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()), - Weight: lbEndpoint.GetLoadBalancingWeight().GetValue(), - }) - } - return endpoints -} - -func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { - ret := EndpointsUpdate{} - for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { - ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) - } - priorities := make(map[uint32]struct{}) - for _, locality := range m.Endpoints { - l := locality.GetLocality() - if l == nil { - return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) - } - lid := internal.LocalityID{ - Region: l.Region, - Zone: l.Zone, - SubZone: l.SubZone, - } - priority := locality.GetPriority() - priorities[priority] = struct{}{} - ret.Localities = append(ret.Localities, Locality{ - ID: lid, - Endpoints: parseEndpoints(locality.GetLbEndpoints()), - Weight: locality.GetLoadBalancingWeight().GetValue(), - Priority: priority, - }) - } - for i := 0; i < len(priorities); i++ { - if _, ok := priorities[uint32(i)]; !ok { - return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) - } - } - return ret, nil -} - -// processAllResources unmarshals and validates the resources, populates the -// provided ret (a map), and returns metadata and error. -// -// The type of the resource is determined by the type of ret. E.g. -// map[string]ListenerUpdate means this is for LDS. -func processAllResources(version string, resources []*anypb.Any, logger *grpclog.PrefixLogger, ret interface{}) (UpdateMetadata, error) { - timestamp := time.Now() - md := UpdateMetadata{ - Version: version, - Timestamp: timestamp, - } - var topLevelErrors []error - perResourceErrors := make(map[string]error) - - for _, r := range resources { - switch ret2 := ret.(type) { - case map[string]ListenerUpdate: - name, update, err := unmarshalListenerResource(r, logger) - if err == nil { - ret2[name] = update - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = ListenerUpdate{} - case map[string]RouteConfigUpdate: - name, update, err := unmarshalRouteConfigResource(r, logger) - if err == nil { - ret2[name] = update - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = RouteConfigUpdate{} - case map[string]ClusterUpdate: - name, update, err := unmarshalClusterResource(r, logger) - if err == nil { - ret2[name] = update - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = ClusterUpdate{} - case map[string]EndpointsUpdate: - name, update, err := unmarshalEndpointsResource(r, logger) - if err == nil { - ret2[name] = update - continue - } - if name == "" { - topLevelErrors = append(topLevelErrors, err) - continue - } - perResourceErrors[name] = err - // Add place holder in the map so we know this resource name was in - // the response. - ret2[name] = EndpointsUpdate{} - } - } - - if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { - md.Status = ServiceStatusACKed - return md, nil - } - - var typeStr string - switch ret.(type) { - case map[string]ListenerUpdate: - typeStr = "LDS" - case map[string]RouteConfigUpdate: - typeStr = "RDS" - case map[string]ClusterUpdate: - typeStr = "CDS" - case map[string]EndpointsUpdate: - typeStr = "EDS" - } - - md.Status = ServiceStatusNACKed - errRet := combineErrors(typeStr, topLevelErrors, perResourceErrors) - md.ErrState = &UpdateErrorMetadata{ - Version: version, - Err: errRet, - Timestamp: timestamp, - } - return md, errRet -} - -func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { - var errStrB strings.Builder - errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) - if len(topLevelErrors) > 0 { - errStrB.WriteString("top level errors: ") - for i, err := range topLevelErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - errStrB.WriteString(err.Error()) - } - } - if len(perResourceErrors) > 0 { - var i int - for name, err := range perResourceErrors { - if i != 0 { - errStrB.WriteString(";\n") - } - i++ - errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) - } - } - return errors.New(errStrB.String()) -} diff --git a/xds/internal/clusterspecifier/cluster_specifier.go b/xds/internal/clusterspecifier/cluster_specifier.go new file mode 100644 index 000000000000..b95a101116ed --- /dev/null +++ b/xds/internal/clusterspecifier/cluster_specifier.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package clusterspecifier contains the ClusterSpecifier interface and a registry for +// storing and retrieving their implementations. +package clusterspecifier + +import ( + "github.com/golang/protobuf/proto" +) + +// BalancerConfig is the Go Native JSON representation of a balancer +// configuration. +type BalancerConfig []map[string]interface{} + +// ClusterSpecifier defines the parsing functionality of a Cluster Specifier. +type ClusterSpecifier interface { + // TypeURLs are the proto message types supported by this + // ClusterSpecifierPlugin. A ClusterSpecifierPlugin will be registered by + // each of its supported message types. + TypeURLs() []string + // ParseClusterSpecifierConfig parses the provided configuration + // proto.Message from the top level RDS configuration. The resulting + // BalancerConfig will be used as configuration for a child LB Policy of the + // Cluster Manager LB Policy. A nil BalancerConfig is invalid. + ParseClusterSpecifierConfig(proto.Message) (BalancerConfig, error) +} + +var ( + // m is a map from scheme to filter. + m = make(map[string]ClusterSpecifier) +) + +// Register registers the ClusterSpecifierPlugin to the ClusterSpecifier map. +// cs.TypeURLs() will be used as the types for this ClusterSpecifierPlugin. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple cluster specifier +// plugins are registered with the same type URL, the one registered last will +// take effect. +func Register(cs ClusterSpecifier) { + for _, u := range cs.TypeURLs() { + m[u] = cs + } +} + +// Get returns the ClusterSpecifier registered with typeURL. +// +// If no cluster specifier is registered with typeURL, nil will be returned. +func Get(typeURL string) ClusterSpecifier { + return m[typeURL] +} + +// UnregisterForTesting unregisters the ClusterSpecifier for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} diff --git a/xds/internal/clusterspecifier/rls/rls.go b/xds/internal/clusterspecifier/rls/rls.go new file mode 100644 index 000000000000..4c39e85739db --- /dev/null +++ b/xds/internal/clusterspecifier/rls/rls.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rls implements the RLS cluster specifier plugin. +package rls + +import ( + "encoding/json" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/anypb" +) + +func init() { + if envconfig.XDSRLS { + clusterspecifier.Register(rls{}) + } + + // TODO: Remove these once the RLS env var is removed. + internal.RegisterRLSClusterSpecifierPluginForTesting = func() { + clusterspecifier.Register(rls{}) + } + internal.UnregisterRLSClusterSpecifierPluginForTesting = func() { + for _, typeURL := range rls.TypeURLs(rls{}) { + clusterspecifier.UnregisterForTesting(typeURL) + } + } +} + +type rls struct{} + +func (rls) TypeURLs() []string { + return []string{"type.googleapis.com/grpc.lookup.v1.RouteLookupClusterSpecifier"} +} + +// lbConfigJSON is the RLS LB Policies configuration in JSON format. +// RouteLookupConfig will be a raw JSON string from the passed in proto +// configuration, and the other fields will be hardcoded. +type lbConfigJSON struct { + RouteLookupConfig json.RawMessage `json:"routeLookupConfig"` + ChildPolicy []map[string]json.RawMessage `json:"childPolicy"` + ChildPolicyConfigTargetFieldName string `json:"childPolicyConfigTargetFieldName"` +} + +func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.BalancerConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rls_csp: nil configuration message provided") + } + any, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg) + } + rlcs := new(rlspb.RouteLookupClusterSpecifier) + + if err := ptypes.UnmarshalAny(any, rlcs); err != nil { + return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err) + } + rlcJSON, err := protojson.Marshal(rlcs.GetRouteLookupConfig()) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling route lookup config: %v: %v", rlcs.GetRouteLookupConfig(), err) + } + lbCfgJSON := &lbConfigJSON{ + RouteLookupConfig: rlcJSON, // "JSON form of RouteLookupClusterSpecifier.config" - RLS in xDS Design Doc + ChildPolicy: []map[string]json.RawMessage{ + { + "cds_experimental": json.RawMessage("{}"), + }, + }, + ChildPolicyConfigTargetFieldName: "cluster", + } + + rawJSON, err := json.Marshal(lbCfgJSON) + if err != nil { + return nil, fmt.Errorf("rls_csp: error marshaling load balancing config %v: %v", lbCfgJSON, err) + } + + rlsBB := balancer.Get(internal.RLSLoadBalancingPolicyName) + if rlsBB == nil { + return nil, fmt.Errorf("RLS LB policy not registered") + } + if _, err = rlsBB.(balancer.ConfigParser).ParseConfig(rawJSON); err != nil { + return nil, fmt.Errorf("rls_csp: validation error from rls lb policy parsing: %v", err) + } + + return clusterspecifier.BalancerConfig{{internal.RLSLoadBalancingPolicyName: lbCfgJSON}}, nil +} diff --git a/xds/internal/clusterspecifier/rls/rls_test.go b/xds/internal/clusterspecifier/rls/rls_test.go new file mode 100644 index 000000000000..9e0a10b648e6 --- /dev/null +++ b/xds/internal/clusterspecifier/rls/rls_test.go @@ -0,0 +1,169 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package rls + +import ( + "encoding/json" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/grpctest" + rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/types/known/durationpb" + + _ "google.golang.org/grpc/balancer/rls" // Register the RLS LB policy. + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS LB policy. +) + +func init() { + clusterspecifier.Register(rls{}) +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +// TestParseClusterSpecifierConfig tests the parsing functionality of the RLS +// Cluster Specifier Plugin. +func (s) TestParseClusterSpecifierConfig(t *testing.T) { + tests := []struct { + name string + rlcs proto.Message + wantConfig clusterspecifier.BalancerConfig + wantErr bool + }{ + { + name: "invalid-rls-cluster-specifier", + rlcs: rlsClusterSpecifierConfigError, + wantErr: true, + }, + { + name: "valid-rls-cluster-specifier", + rlcs: rlsClusterSpecifierConfigWithoutTransformations, + wantConfig: configWithoutTransformationsWant, + }, + } + for _, test := range tests { + cs := clusterspecifier.Get("type.googleapis.com/grpc.lookup.v1.RouteLookupClusterSpecifier") + if cs == nil { + t.Fatal("Error getting cluster specifier") + } + lbCfg, err := cs.ParseClusterSpecifierConfig(test.rlcs) + + if (err != nil) != test.wantErr { + t.Fatalf("ParseClusterSpecifierConfig(%+v) returned err: %v, wantErr: %v", test.rlcs, err, test.wantErr) + } + if test.wantErr { // Successfully received an error. + return + } + // Marshal and then unmarshal into interface{} to get rid of + // nondeterministic protojson Marshaling. + lbCfgJSON, err := json.Marshal(lbCfg) + if err != nil { + t.Fatalf("json.Marshal(%+v) returned err %v", lbCfg, err) + } + var got interface{} + err = json.Unmarshal(lbCfgJSON, got) + if err != nil { + t.Fatalf("json.Unmarshal(%+v) returned err %v", lbCfgJSON, err) + } + wantCfgJSON, err := json.Marshal(test.wantConfig) + if err != nil { + t.Fatalf("json.Marshal(%+v) returned err %v", test.wantConfig, err) + } + var want interface{} + err = json.Unmarshal(wantCfgJSON, want) + if err != nil { + t.Fatalf("json.Unmarshal(%+v) returned err %v", lbCfgJSON, err) + } + if diff := cmp.Diff(want, got, cmpopts.EquateEmpty()); diff != "" { + t.Fatalf("ParseClusterSpecifierConfig(%+v) returned expected, diff (-want +got) %v", test.rlcs, diff) + } + } +} + +// This will error because the required match field is set in grpc key builder. +var rlsClusterSpecifierConfigError = testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ + RouteLookupConfig: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + { + Service: "service", + Method: "method", + }, + }, + Headers: []*rlspb.NameMatcher{ + { + Key: "k1", + RequiredMatch: true, + Names: []string{"v1"}, + }, + }, + }, + }, + }, +}) + +// Corresponds to the rls unit test case in +// balancer/rls/internal/config_test.go. +var rlsClusterSpecifierConfigWithoutTransformations = testutils.MarshalAny(&rlspb.RouteLookupClusterSpecifier{ + RouteLookupConfig: &rlspb.RouteLookupConfig{ + GrpcKeybuilders: []*rlspb.GrpcKeyBuilder{ + { + Names: []*rlspb.GrpcKeyBuilder_Name{ + { + Service: "service", + Method: "method", + }, + }, + Headers: []*rlspb.NameMatcher{ + { + Key: "k1", + Names: []string{"v1"}, + }, + }, + }, + }, + LookupService: "target", + LookupServiceTimeout: &durationpb.Duration{Seconds: 100}, + MaxAge: &durationpb.Duration{Seconds: 60}, + StaleAge: &durationpb.Duration{Seconds: 50}, + CacheSizeBytes: 1000, + DefaultTarget: "passthrough:///default", + }, +}) + +var configWithoutTransformationsWant = clusterspecifier.BalancerConfig{{"rls_experimental": &lbConfigJSON{ + RouteLookupConfig: []byte(`{"grpcKeybuilders":[{"names":[{"service":"service","method":"method"}],"headers":[{"key":"k1","names":["v1"]}]}],"lookupService":"target","lookupServiceTimeout":"100s","maxAge":"60s","staleAge":"50s","cacheSizeBytes":"1000","defaultTarget":"passthrough:///default"}`), + ChildPolicy: []map[string]json.RawMessage{ + { + "cds_experimental": []byte(`{}`), + }, + }, + ChildPolicyConfigTargetFieldName: "cluster", +}}} diff --git a/xds/internal/httpfilter/fault/fault.go b/xds/internal/httpfilter/fault/fault.go index 639a08db8e3c..725b50a76a83 100644 --- a/xds/internal/httpfilter/fault/fault.go +++ b/xds/internal/httpfilter/fault/fault.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/internal/xds/env" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/httpfilter" @@ -63,9 +62,7 @@ var statusMap = map[int]codes.Code{ } func init() { - if env.FaultInjectionSupport { - httpfilter.Register(builder{}) - } + httpfilter.Register(builder{}) } type builder struct { @@ -104,6 +101,10 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil return parseConfig(override) } +func (builder) IsTerminal() bool { + return false +} + var _ httpfilter.ClientInterceptorBuilder = builder{} func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { @@ -125,7 +126,12 @@ func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (ir } } - return &interceptor{config: c.config}, nil + icfg := c.config + if (icfg.GetMaxActiveFaults() != nil && icfg.GetMaxActiveFaults().GetValue() == 0) || + (icfg.GetDelay() == nil && icfg.GetAbort() == nil) { + return nil, nil + } + return &interceptor{config: icfg}, nil } type interceptor struct { diff --git a/xds/internal/httpfilter/fault/fault_test.go b/xds/internal/httpfilter/fault/fault_test.go index 61100e8c44f8..20dd0a2c95cd 100644 --- a/xds/internal/httpfilter/fault/fault_test.go +++ b/xds/internal/httpfilter/fault/fault_test.go @@ -1,3 +1,4 @@ +//go:build !386 // +build !386 /* @@ -37,13 +38,11 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpctest" - "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/httpfilter" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/e2e" "google.golang.org/protobuf/types/known/wrapperspb" v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" @@ -51,13 +50,15 @@ import ( fpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3" v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" tpb "github.com/envoyproxy/go-control-plane/envoy/type/v3" - testpb "google.golang.org/grpc/test/grpc_testing" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/client/v3" // Register the v3 xDS API client. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. ) +const defaultTestTimeout = 10 * time.Second + type s struct { grpctest.Tester } @@ -67,14 +68,14 @@ func Test(t *testing.T) { } type testService struct { - testpb.TestServiceServer + testgrpc.TestServiceServer } func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { return &testpb.Empty{}, nil } -func (*testService) FullDuplexCall(stream testpb.TestService_FullDuplexCallServer) error { +func (*testService) FullDuplexCall(stream testgrpc.TestService_FullDuplexCallServer) error { // End RPC after client does a CloseSend. for { if _, err := stream.Recv(); err == io.EOF { @@ -91,22 +92,21 @@ func (*testService) FullDuplexCall(stream testpb.TestService_FullDuplexCallServe // - create a local TCP listener and start serving on it // // Returns the following: -// - the management server: tests use this to configure resources -// - nodeID expected by the management server: this is set in the Node proto -// sent by the xdsClient for queries. -// - the port the server is listening on -// - cleanup function to be invoked by the tests when done +// - the management server: tests use this to configure resources +// - nodeID expected by the management server: this is set in the Node proto +// sent by the xdsClient for queries. +// - the port the server is listening on +// - cleanup function to be invoked by the tests when done func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { // Spin up a xDS management server on a local port. nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() + fs, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) if err != nil { t.Fatal(err) } // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xds.SetupBootstrapFile(xds.BootstrapOptions{ - Version: xds.TransportV3, + bootstrapCleanup, err := bootstrap.CreateFile(bootstrap.Options{ NodeID: nodeID, ServerURI: fs.Address, ServerListenerResourceNameTemplate: "grpc/server", @@ -117,7 +117,7 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { // Initialize a gRPC server and register the stubServer on it. server := grpc.NewServer() - testpb.RegisterTestServiceServer(server, &testService{}) + testgrpc.RegisterTestServiceServer(server, &testService{}) // Create a local listener and pass it to Serve(). lis, err := testutils.LocalTCPListener() @@ -138,13 +138,6 @@ func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { } } -func init() { - env.FaultInjectionSupport = true - // Manually register to avoid a race between this init and the one that - // check the env var to register the fault injection filter. - httpfilter.Register(builder{}) -} - func (s) TestFaultInjection_Unary(t *testing.T) { type subcase struct { name string @@ -160,6 +153,28 @@ func (s) TestFaultInjection_Unary(t *testing.T) { randOutInc int want []subcase }{{ + name: "max faults zero", + cfgs: []*fpb.HTTPFault{{ + MaxActiveFaults: wrapperspb.UInt32(0), + Abort: &fpb.FaultAbort{ + Percentage: &tpb.FractionalPercent{Numerator: 100, Denominator: tpb.FractionalPercent_HUNDRED}, + ErrorType: &fpb.FaultAbort_GrpcStatus{GrpcStatus: uint32(codes.Aborted)}, + }, + }}, + randOutInc: 5, + want: []subcase{{ + code: codes.OK, + repeat: 25, + }}, + }, { + name: "no abort or delay", + cfgs: []*fpb.HTTPFault{{}}, + randOutInc: 5, + want: []subcase{{ + code: codes.OK, + repeat: 25, + }}, + }, { name: "abort always", cfgs: []*fpb.HTTPFault{{ Abort: &fpb.FaultAbort{ @@ -464,15 +479,8 @@ func (s) TestFaultInjection_Unary(t *testing.T) { fs, nodeID, port, cleanup := clientSetup(t) defer cleanup() - resources := e2e.DefaultClientResources("myservice", nodeID, "localhost", port) - hcm := new(v3httppb.HttpConnectionManager) - err := ptypes.UnmarshalAny(resources.Listeners[0].GetApiListener().GetApiListener(), hcm) - if err != nil { - t.Fatal(err) - } - routerFilter := hcm.HttpFilters[len(hcm.HttpFilters)-1] - for _, tc := range testCases { + for tcNum, tc := range testCases { t.Run(tc.name, func(t *testing.T) { defer func() { randIntn = grpcrand.Intn; newTimer = time.NewTimer }() var intnCalls []int @@ -488,51 +496,63 @@ func (s) TestFaultInjection_Unary(t *testing.T) { return time.NewTimer(0) } + serviceName := fmt.Sprintf("myservice%d", tcNum) + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: serviceName, + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) + hcm := new(v3httppb.HttpConnectionManager) + err := ptypes.UnmarshalAny(resources.Listeners[0].GetApiListener().GetApiListener(), hcm) + if err != nil { + t.Fatal(err) + } + routerFilter := hcm.HttpFilters[len(hcm.HttpFilters)-1] + hcm.HttpFilters = nil for i, cfg := range tc.cfgs { hcm.HttpFilters = append(hcm.HttpFilters, e2e.HTTPFilter(fmt.Sprintf("fault%d", i), cfg)) } hcm.HttpFilters = append(hcm.HttpFilters, routerFilter) - hcmAny, err := ptypes.MarshalAny(hcm) - if err != nil { - t.Fatal(err) - } + hcmAny := testutils.MarshalAny(hcm) resources.Listeners[0].ApiListener.ApiListener = hcmAny resources.Listeners[0].FilterChains[0].Filters[0].ConfigType = &v3listenerpb.Filter_TypedConfig{TypedConfig: hcmAny} - if err := fs.Update(resources); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := fs.Update(ctx, resources); err != nil { t.Fatal(err) } // Create a ClientConn and run the test case. - cc, err := grpc.Dial("xds:///myservice", grpc.WithTransportCredentials(insecure.NewCredentials())) + cc, err := grpc.Dial("xds:///"+serviceName, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { t.Fatalf("failed to dial local test server: %v", err) } defer cc.Close() - client := testpb.NewTestServiceClient(cc) + client := testgrpc.NewTestServiceClient(cc) count := 0 for _, want := range tc.want { - t.Run(want.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - if want.repeat == 0 { - t.Fatalf("invalid repeat count") + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + if want.repeat == 0 { + t.Fatalf("invalid repeat count") + } + for n := 0; n < want.repeat; n++ { + intnCalls = nil + newTimerCalls = nil + ctx = metadata.NewOutgoingContext(ctx, want.md) + _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) + t.Logf("%v: RPC %d: err: %v, intnCalls: %v, newTimerCalls: %v", want.name, count, err, intnCalls, newTimerCalls) + if status.Code(err) != want.code || !reflect.DeepEqual(intnCalls, want.randIn) || !reflect.DeepEqual(newTimerCalls, want.delays) { + t.Fatalf("WANTED code: %v, intnCalls: %v, newTimerCalls: %v", want.code, want.randIn, want.delays) } - for n := 0; n < want.repeat; n++ { - intnCalls = nil - newTimerCalls = nil - ctx = metadata.NewOutgoingContext(ctx, want.md) - _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)) - t.Logf("RPC %d: err: %v, intnCalls: %v, newTimerCalls: %v", count, err, intnCalls, newTimerCalls) - if status.Code(err) != want.code || !reflect.DeepEqual(intnCalls, want.randIn) || !reflect.DeepEqual(newTimerCalls, want.delays) { - t.Errorf("WANTED code: %v, intnCalls: %v, newTimerCalls: %v", want.code, want.randIn, want.delays) - } - randOut += tc.randOutInc - count++ - } - }) + randOut += tc.randOutInc + count++ + } } }) } @@ -541,7 +561,13 @@ func (s) TestFaultInjection_Unary(t *testing.T) { func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { fs, nodeID, port, cleanup := clientSetup(t) defer cleanup() - resources := e2e.DefaultClientResources("myservice", nodeID, "localhost", port) + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: "myservice", + NodeID: nodeID, + Host: "localhost", + Port: port, + SecLevel: e2e.SecurityLevelNone, + }) hcm := new(v3httppb.HttpConnectionManager) err := ptypes.UnmarshalAny(resources.Listeners[0].GetApiListener().GetApiListener(), hcm) if err != nil { @@ -565,14 +591,13 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { }, })}, hcm.HttpFilters...) - hcmAny, err := ptypes.MarshalAny(hcm) - if err != nil { - t.Fatal(err) - } + hcmAny := testutils.MarshalAny(hcm) resources.Listeners[0].ApiListener.ApiListener = hcmAny resources.Listeners[0].FilterChains[0].Filters[0].ConfigType = &v3listenerpb.Filter_TypedConfig{TypedConfig: hcmAny} - if err := fs.Update(resources); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := fs.Update(ctx, resources); err != nil { t.Fatal(err) } @@ -583,11 +608,9 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { } defer cc.Close() - client := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() + client := testgrpc.NewTestServiceClient(cc) - streams := make(chan testpb.TestService_FullDuplexCallClient) + streams := make(chan testgrpc.TestService_FullDuplexCallClient, 5) // startStream() is called 5 times startStream := func() { str, err := client.FullDuplexCall(ctx) if err != nil { @@ -599,7 +622,7 @@ func (s) TestFaultInjection_MaxActiveFaults(t *testing.T) { str := <-streams str.CloseSend() if _, err := str.Recv(); err != io.EOF { - t.Fatal("stream error:", err) + t.Error("stream error:", err) } } releaseStream := func() { diff --git a/xds/internal/httpfilter/httpfilter.go b/xds/internal/httpfilter/httpfilter.go index 6650241fab71..dd9a278389b5 100644 --- a/xds/internal/httpfilter/httpfilter.go +++ b/xds/internal/httpfilter/httpfilter.go @@ -40,16 +40,20 @@ type Filter interface { // will be registered by each of its supported message types. TypeURLs() []string // ParseFilterConfig parses the provided configuration proto.Message from - // the LDS configuration of this filter. This may be an anypb.Any or a - // udpa.type.v1.TypedStruct for filters that do not accept a custom type. - // The resulting FilterConfig will later be passed to Build. + // the LDS configuration of this filter. This may be an anypb.Any, a + // udpa.type.v1.TypedStruct, or an xds.type.v3.TypedStruct for filters that + // do not accept a custom type. The resulting FilterConfig will later be + // passed to Build. ParseFilterConfig(proto.Message) (FilterConfig, error) // ParseFilterConfigOverride parses the provided override configuration // proto.Message from the RDS override configuration of this filter. This - // may be an anypb.Any or a udpa.type.v1.TypedStruct for filters that do - // not accept a custom type. The resulting FilterConfig will later be - // passed to Build. + // may be an anypb.Any, a udpa.type.v1.TypedStruct, or an + // xds.type.v3.TypedStruct for filters that do not accept a custom type. + // The resulting FilterConfig will later be passed to Build. ParseFilterConfigOverride(proto.Message) (FilterConfig, error) + // IsTerminal returns whether this Filter is terminal or not (i.e. it must + // be last filter in the filter chain). + IsTerminal() bool } // ClientInterceptorBuilder constructs a Client Interceptor. If this type is @@ -65,9 +69,6 @@ type ClientInterceptorBuilder interface { // ServerInterceptorBuilder constructs a Server Interceptor. If this type is // implemented by a Filter, it is capable of working on a server. -// -// Server side filters are not currently supported, but this interface is -// defined for clarity. type ServerInterceptorBuilder interface { // BuildServerInterceptor uses the FilterConfigs produced above to produce // an HTTP filter interceptor for servers. config will always be non-nil, @@ -94,6 +95,11 @@ func Register(b Filter) { } } +// UnregisterForTesting unregisters the HTTP Filter for testing purposes. +func UnregisterForTesting(typeURL string) { + delete(m, typeURL) +} + // Get returns the HTTPFilter registered with typeURL. // // If no filter is register with typeURL, nil will be returned. diff --git a/xds/internal/httpfilter/rbac/rbac.go b/xds/internal/httpfilter/rbac/rbac.go new file mode 100644 index 000000000000..277fcfc5927a --- /dev/null +++ b/xds/internal/httpfilter/rbac/rbac.go @@ -0,0 +1,218 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package rbac implements the Envoy RBAC HTTP filter. +package rbac + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/rbac" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" + + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" +) + +func init() { + if envconfig.XDSRBAC { + httpfilter.Register(builder{}) + } + + // TODO: Remove these once the RBAC env var is removed. + internal.RegisterRBACHTTPFilterForTesting = func() { + httpfilter.Register(builder{}) + } + internal.UnregisterRBACHTTPFilterForTesting = func() { + for _, typeURL := range builder.TypeURLs(builder{}) { + httpfilter.UnregisterForTesting(typeURL) + } + } +} + +type builder struct { +} + +type config struct { + httpfilter.FilterConfig + chainEngine *rbac.ChainEngine +} + +func (builder) TypeURLs() []string { + return []string{ + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC", + "type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute", + } +} + +// Parsing is the same for the base config and the override config. +func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { + // All the validation logic described in A41. + for _, policy := range rbacCfg.GetRules().GetPolicies() { + // "Policy.condition and Policy.checked_condition must cause a + // validation failure if present." - A41 + if policy.Condition != nil { + return nil, errors.New("rbac: Policy.condition is present") + } + if policy.CheckedCondition != nil { + return nil, errors.New("rbac: policy.CheckedCondition is present") + } + + // "It is also a validation failure if Permission or Principal has a + // header matcher for a grpc- prefixed header name or :scheme." - A41 + for _, principal := range policy.Principals { + name := principal.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: principal header matcher for %v is :scheme or starts with grpc", name) + } + } + for _, permission := range policy.Permissions { + name := permission.GetHeader().GetName() + if name == ":scheme" || strings.HasPrefix(name, "grpc-") { + return nil, fmt.Errorf("rbac: permission header matcher for %v is :scheme or starts with grpc", name) + } + } + } + + // "Envoy aliases :authority and Host in its header map implementation, so + // they should be treated equivalent for the RBAC matchers; there must be no + // behavior change depending on which of the two header names is used in the + // RBAC policy." - A41. Loop through config's principals and policies, change + // any header matcher with value "host" to :authority", as that is what + // grpc-go shifts both headers to in transport layer. + for _, policy := range rbacCfg.GetRules().GetPolicies() { + for _, principal := range policy.Principals { + if principal.GetHeader().GetName() == "host" { + principal.GetHeader().Name = ":authority" + } + } + for _, permission := range policy.Permissions { + if permission.GetHeader().GetName() == "host" { + permission.GetHeader().Name = ":authority" + } + } + } + + // Two cases where this HTTP Filter is a no op: + // "If absent, no enforcing RBAC policy will be applied" - RBAC + // Documentation for Rules field. + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configurated." - A41 + if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG { + return config{}, nil + } + + // TODO(gregorycooke) - change the call chain to here so we have the filter + // name to input here instead of an empty string. It will come from here: + // https://github.com/grpc/grpc-go/blob/eff0942e95d93112921414aee758e619ec86f26f/xds/internal/xdsclient/xdsresource/unmarshal_lds.go#L199 + ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") + if err != nil { + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configurated." - A41 + if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG { + return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err) + } + } + + return config{chainEngine: ce}, nil +} + +func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("rbac: nil configuration message provided") + } + any, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rbac: error parsing config %v: unknown type %T", cfg, cfg) + } + msg := new(rpb.RBAC) + if err := ptypes.UnmarshalAny(any, msg); err != nil { + return nil, fmt.Errorf("rbac: error parsing config %v: %v", cfg, err) + } + return parseConfig(msg) +} + +func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + if override == nil { + return nil, fmt.Errorf("rbac: nil configuration message provided") + } + any, ok := override.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("rbac: error parsing override config %v: unknown type %T", override, override) + } + msg := new(rpb.RBACPerRoute) + if err := ptypes.UnmarshalAny(any, msg); err != nil { + return nil, fmt.Errorf("rbac: error parsing override config %v: %v", override, err) + } + return parseConfig(msg.Rbac) +} + +func (builder) IsTerminal() bool { + return false +} + +var _ httpfilter.ServerInterceptorBuilder = builder{} + +// BuildServerInterceptor is an optional interface builder implements in order +// to signify it works server side. +func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override httpfilter.FilterConfig) (resolver.ServerInterceptor, error) { + if cfg == nil { + return nil, fmt.Errorf("rbac: nil config provided") + } + + c, ok := cfg.(config) + if !ok { + return nil, fmt.Errorf("rbac: incorrect config type provided (%T): %v", cfg, cfg) + } + + if override != nil { + // override completely replaces the listener configuration; but we + // still validate the listener config type. + c, ok = override.(config) + if !ok { + return nil, fmt.Errorf("rbac: incorrect override config type provided (%T): %v", override, override) + } + } + + // RBAC HTTP Filter is a no op from one of these two cases: + // "If absent, no enforcing RBAC policy will be applied" - RBAC + // Documentation for Rules field. + // "At this time, if the RBAC.action is Action.LOG then the policy will be + // completely ignored, as if RBAC was not configurated." - A41 + if c.chainEngine == nil { + return nil, nil + } + return &interceptor{chainEngine: c.chainEngine}, nil +} + +type interceptor struct { + chainEngine *rbac.ChainEngine +} + +func (i *interceptor) AllowRPC(ctx context.Context) error { + return i.chainEngine.IsAuthorized(ctx) +} diff --git a/xds/internal/httpfilter/router/router.go b/xds/internal/httpfilter/router/router.go index 26e3acb5a4f4..1ac6518170fc 100644 --- a/xds/internal/httpfilter/router/router.go +++ b/xds/internal/httpfilter/router/router.go @@ -73,7 +73,14 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil return config{}, nil } -var _ httpfilter.ClientInterceptorBuilder = builder{} +func (builder) IsTerminal() bool { + return true +} + +var ( + _ httpfilter.ClientInterceptorBuilder = builder{} + _ httpfilter.ServerInterceptorBuilder = builder{} +) func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ClientInterceptor, error) { if _, ok := cfg.(config); !ok { @@ -88,6 +95,18 @@ func (builder) BuildClientInterceptor(cfg, override httpfilter.FilterConfig) (ir return nil, nil } +func (builder) BuildServerInterceptor(cfg, override httpfilter.FilterConfig) (iresolver.ServerInterceptor, error) { + if _, ok := cfg.(config); !ok { + return nil, fmt.Errorf("router: incorrect config type provided (%T): %v", cfg, cfg) + } + if override != nil { + return nil, fmt.Errorf("router: unexpected override configuration specified: %v", override) + } + // The gRPC router is currently unimplemented on the server side. So we + // return a nil HTTPFilter, which will not be invoked. + return nil, nil +} + // The gRPC router filter does not currently support any configuration. Verify // type only. type config struct { diff --git a/xds/internal/internal.go b/xds/internal/internal.go index e4284ee02e0c..ba6fa3d78807 100644 --- a/xds/internal/internal.go +++ b/xds/internal/internal.go @@ -22,6 +22,8 @@ package internal import ( "encoding/json" "fmt" + + "google.golang.org/grpc/resolver" ) // LocalityID is xds.Locality without XXX fields, so it can be used as map @@ -44,6 +46,15 @@ func (l LocalityID) ToString() (string, error) { return string(b), nil } +// Equal allows the values to be compared by Attributes.Equal. +func (l LocalityID) Equal(o interface{}) bool { + ol, ok := o.(LocalityID) + if !ok { + return false + } + return l.Region == ol.Region && l.Zone == ol.Zone && l.SubZone == ol.SubZone +} + // LocalityIDFromString converts a json representation of locality, into a // LocalityID struct. func LocalityIDFromString(s string) (ret LocalityID, _ error) { @@ -53,3 +64,22 @@ func LocalityIDFromString(s string) (ret LocalityID, _ error) { } return ret, nil } + +type localityKeyType string + +const localityKey = localityKeyType("grpc.xds.internal.address.locality") + +// GetLocalityID returns the locality ID of addr. +func GetLocalityID(addr resolver.Address) LocalityID { + path, _ := addr.BalancerAttributes.Value(localityKey).(LocalityID) + return path +} + +// SetLocalityID sets locality ID in addr to l. +func SetLocalityID(addr resolver.Address, l LocalityID) resolver.Address { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(localityKey, l) + return addr +} + +// ResourceTypeMapForTesting maps TypeUrl to corresponding ResourceType. +var ResourceTypeMapForTesting map[string]interface{} diff --git a/xds/internal/resolver/cluster_specifier_plugin_test.go b/xds/internal/resolver/cluster_specifier_plugin_test.go new file mode 100644 index 000000000000..2a01beaeebc6 --- /dev/null +++ b/xds/internal/resolver/cluster_specifier_plugin_test.go @@ -0,0 +1,483 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +import ( + "context" + "encoding/json" + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/testutils" + xdsbootstrap "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/clustermanager" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" +) + +func init() { + balancer.Register(cspBalancerBuilder{}) + clusterspecifier.Register(testClusterSpecifierPlugin{}) +} + +// cspBalancerBuilder is a no-op LB policy which is referenced by the +// testClusterSpecifierPlugin. +type cspBalancerBuilder struct{} + +func (cspBalancerBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + return nil +} + +func (cspBalancerBuilder) Name() string { + return "csp_experimental" +} + +type cspBalancerConfig struct { + serviceconfig.LoadBalancingConfig + ArbitraryField string `json:"arbitrary_field"` +} + +func (cspBalancerBuilder) ParseConfig(lbCfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &cspBalancerConfig{} + if err := json.Unmarshal(lbCfg, cfg); err != nil { + return nil, err + } + return cfg, nil + +} + +// testClusterSpecifierPlugin is a test cluster specifier plugin which returns +// an LB policy configuration specifying the cspBalancer. +type testClusterSpecifierPlugin struct { +} + +func (testClusterSpecifierPlugin) TypeURLs() []string { + // The config for this plugin contains a wrapperspb.StringValue, and since + // we marshal that proto as an Any proto, the type URL on the latter gets + // set to "type.googleapis.com/google.protobuf.StringValue". If we wanted a + // more descriptive type URL for this test plugin, we would have to define a + // proto package with a message for the configuration. That would be + // overkill for a test. Therefore, this seems to be an acceptable tradeoff. + return []string{"type.googleapis.com/google.protobuf.StringValue"} +} + +func (testClusterSpecifierPlugin) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.BalancerConfig, error) { + if cfg == nil { + return nil, fmt.Errorf("testClusterSpecifierPlugin: nil configuration message provided") + } + any, ok := cfg.(*anypb.Any) + if !ok { + return nil, fmt.Errorf("testClusterSpecifierPlugin: error parsing config %v: got type %T, want *anypb.Any", cfg, cfg) + } + lbCfg := new(wrapperspb.StringValue) + if err := ptypes.UnmarshalAny(any, lbCfg); err != nil { + return nil, fmt.Errorf("testClusterSpecifierPlugin: error parsing config %v: %v", cfg, err) + } + return []map[string]interface{}{{"csp_experimental": cspBalancerConfig{ArbitraryField: lbCfg.GetValue()}}}, nil +} + +// TestResolverClusterSpecifierPlugin tests the case where a route configuration +// containing cluster specifier plugins is sent by the management server. The +// test verifies that the service config output by the resolver contains the LB +// policy specified by the cluster specifier plugin, and the config selector +// returns the cluster associated with the cluster specifier plugin. +// +// The test also verifies that a change in the cluster specifier plugin config +// result in appropriate change in the service config pushed by the resolver. +func (s) TestResolverClusterSpecifierPlugin(t *testing.T) { + // Env var GRPC_EXPERIMENTAL_XDS_RLS_LB controls whether the xDS client + // allows routes with cluster specifier plugin as their route action. + oldRLS := envconfig.XDSRLS + envconfig.XDSRLS = true + defer func() { + envconfig.XDSRLS = oldRLS + }() + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + // Configure listener and route configuration resources on the management + // server. + const serviceName = "my-service-client-side-xds" + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: serviceName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "cspA", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&wrapperspb.StringValue{Value: "anything"}), + })}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Wait for an update from the resolver, and verify the service config. + val, err := tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState := val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspA": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anything" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } + + cs := iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) + if err != nil { + t.Fatalf("cs.SelectConfig(): %v", err) + } + + gotCluster := clustermanager.GetPickedClusterForTesting(res.Context) + wantCluster := "cluster_specifier_plugin:cspA" + if gotCluster != wantCluster { + t.Fatalf("config selector returned cluster: %v, want: %v", gotCluster, wantCluster) + } + + // Change the cluster specifier plugin configuration. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: serviceName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "cspA", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&wrapperspb.StringValue{Value: "changed"}), + })}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for an update from the resolver, and verify the service config. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState = val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspA": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "changed" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } +} + +// TestXDSResolverDelayedOnCommittedCSP tests that cluster specifier plugins and +// their corresponding configurations remain in service config if RPCs are in +// flight. +func (s) TestXDSResolverDelayedOnCommittedCSP(t *testing.T) { + // Env var GRPC_EXPERIMENTAL_XDS_RLS_LB controls whether the xDS client + // allows routes with cluster specifier plugin as their route action. + oldRLS := envconfig.XDSRLS + envconfig.XDSRLS = true + defer func() { + envconfig.XDSRLS = oldRLS + }() + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + // Configure listener and route configuration resources on the management + // server. + const serviceName = "my-service-client-side-xds" + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: serviceName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "cspA", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&wrapperspb.StringValue{Value: "anythingA"}), + })}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Wait for an update from the resolver, and verify the service config. + val, err := tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState := val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspA": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anythingA" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } + + cs := iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } + resOld, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) + if err != nil { + t.Fatalf("cs.SelectConfig(): %v", err) + } + + gotCluster := clustermanager.GetPickedClusterForTesting(resOld.Context) + wantCluster := "cluster_specifier_plugin:cspA" + if gotCluster != wantCluster { + t.Fatalf("config selector returned cluster: %v, want: %v", gotCluster, wantCluster) + } + + // Delay resOld.OnCommitted(). As long as there are pending RPCs to removed + // clusters, they still appear in the service config. + + // Change the cluster specifier plugin configuration. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: serviceName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeClusterSpecifierPlugin, + ClusterSpecifierPluginName: "cspB", + ClusterSpecifierPluginConfig: testutils.MarshalAny(&wrapperspb.StringValue{Value: "anythingB"}), + })}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Wait for an update from the resolver, and verify the service config. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState = val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspA": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anythingA" + } + } + ] + }, + "cluster_specifier_plugin:cspB": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anythingB" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } + + // Perform an RPC and ensure that it is routed to the new cluster. + cs = iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } + resNew, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) + if err != nil { + t.Fatalf("cs.SelectConfig(): %v", err) + } + + gotCluster = clustermanager.GetPickedClusterForTesting(resNew.Context) + wantCluster = "cluster_specifier_plugin:cspB" + if gotCluster != wantCluster { + t.Fatalf("config selector returned cluster: %v, want: %v", gotCluster, wantCluster) + } + + // Invoke resOld.OnCommitted; should lead to a service config update that deletes + // cspA. + resOld.OnCommitted() + + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState = val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster_specifier_plugin:cspB": { + "childPolicy": [ + { + "csp_experimental": { + "arbitrary_field": "anythingB" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } +} diff --git a/xds/internal/resolver/matcher.go b/xds/internal/resolver/matcher.go deleted file mode 100644 index b7b5f3db0e3e..000000000000 --- a/xds/internal/resolver/matcher.go +++ /dev/null @@ -1,161 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package resolver - -import ( - "fmt" - "regexp" - "strings" - - "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/internal/grpcutil" - iresolver "google.golang.org/grpc/internal/resolver" - "google.golang.org/grpc/metadata" - xdsclient "google.golang.org/grpc/xds/internal/client" -) - -func routeToMatcher(r *xdsclient.Route) (*compositeMatcher, error) { - var pathMatcher pathMatcherInterface - switch { - case r.Regex != nil: - re, err := regexp.Compile(*r.Regex) - if err != nil { - return nil, fmt.Errorf("failed to compile regex %q", *r.Regex) - } - pathMatcher = newPathRegexMatcher(re) - case r.Path != nil: - pathMatcher = newPathExactMatcher(*r.Path, r.CaseInsensitive) - case r.Prefix != nil: - pathMatcher = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive) - default: - return nil, fmt.Errorf("illegal route: missing path_matcher") - } - - var headerMatchers []headerMatcherInterface - for _, h := range r.Headers { - var matcherT headerMatcherInterface - switch { - case h.ExactMatch != nil && *h.ExactMatch != "": - matcherT = newHeaderExactMatcher(h.Name, *h.ExactMatch) - case h.RegexMatch != nil && *h.RegexMatch != "": - re, err := regexp.Compile(*h.RegexMatch) - if err != nil { - return nil, fmt.Errorf("failed to compile regex %q, skipping this matcher", *h.RegexMatch) - } - matcherT = newHeaderRegexMatcher(h.Name, re) - case h.PrefixMatch != nil && *h.PrefixMatch != "": - matcherT = newHeaderPrefixMatcher(h.Name, *h.PrefixMatch) - case h.SuffixMatch != nil && *h.SuffixMatch != "": - matcherT = newHeaderSuffixMatcher(h.Name, *h.SuffixMatch) - case h.RangeMatch != nil: - matcherT = newHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End) - case h.PresentMatch != nil: - matcherT = newHeaderPresentMatcher(h.Name, *h.PresentMatch) - default: - return nil, fmt.Errorf("illegal route: missing header_match_specifier") - } - if h.InvertMatch != nil && *h.InvertMatch { - matcherT = newInvertMatcher(matcherT) - } - headerMatchers = append(headerMatchers, matcherT) - } - - var fractionMatcher *fractionMatcher - if r.Fraction != nil { - fractionMatcher = newFractionMatcher(*r.Fraction) - } - return newCompositeMatcher(pathMatcher, headerMatchers, fractionMatcher), nil -} - -// compositeMatcher.match returns true if all matchers return true. -type compositeMatcher struct { - pm pathMatcherInterface - hms []headerMatcherInterface - fm *fractionMatcher -} - -func newCompositeMatcher(pm pathMatcherInterface, hms []headerMatcherInterface, fm *fractionMatcher) *compositeMatcher { - return &compositeMatcher{pm: pm, hms: hms, fm: fm} -} - -func (a *compositeMatcher) match(info iresolver.RPCInfo) bool { - if a.pm != nil && !a.pm.match(info.Method) { - return false - } - - // Call headerMatchers even if md is nil, because routes may match - // non-presence of some headers. - var md metadata.MD - if info.Context != nil { - md, _ = metadata.FromOutgoingContext(info.Context) - if extraMD, ok := grpcutil.ExtraMetadata(info.Context); ok { - md = metadata.Join(md, extraMD) - // Remove all binary headers. They are hard to match with. May need - // to add back if asked by users. - for k := range md { - if strings.HasSuffix(k, "-bin") { - delete(md, k) - } - } - } - } - for _, m := range a.hms { - if !m.match(md) { - return false - } - } - - if a.fm != nil && !a.fm.match() { - return false - } - return true -} - -func (a *compositeMatcher) String() string { - var ret string - if a.pm != nil { - ret += a.pm.String() - } - for _, m := range a.hms { - ret += m.String() - } - if a.fm != nil { - ret += a.fm.String() - } - return ret -} - -type fractionMatcher struct { - fraction int64 // real fraction is fraction/1,000,000. -} - -func newFractionMatcher(fraction uint32) *fractionMatcher { - return &fractionMatcher{fraction: int64(fraction)} -} - -var grpcrandInt63n = grpcrand.Int63n - -func (fm *fractionMatcher) match() bool { - t := grpcrandInt63n(1000000) - return t <= fm.fraction -} - -func (fm *fractionMatcher) String() string { - return fmt.Sprintf("fraction:%v", fm.fraction) -} diff --git a/xds/internal/resolver/matcher_header.go b/xds/internal/resolver/matcher_header.go deleted file mode 100644 index 05a92788d7bf..000000000000 --- a/xds/internal/resolver/matcher_header.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package resolver - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "google.golang.org/grpc/metadata" -) - -type headerMatcherInterface interface { - match(metadata.MD) bool - String() string -} - -// mdValuesFromOutgoingCtx retrieves metadata from context. If there are -// multiple values, the values are concatenated with "," (comma and no space). -// -// All header matchers only match against the comma-concatenated string. -func mdValuesFromOutgoingCtx(md metadata.MD, key string) (string, bool) { - vs, ok := md[key] - if !ok { - return "", false - } - return strings.Join(vs, ","), true -} - -type headerExactMatcher struct { - key string - exact string -} - -func newHeaderExactMatcher(key, exact string) *headerExactMatcher { - return &headerExactMatcher{key: key, exact: exact} -} - -func (hem *headerExactMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hem.key) - if !ok { - return false - } - return v == hem.exact -} - -func (hem *headerExactMatcher) String() string { - return fmt.Sprintf("headerExact:%v:%v", hem.key, hem.exact) -} - -type headerRegexMatcher struct { - key string - re *regexp.Regexp -} - -func newHeaderRegexMatcher(key string, re *regexp.Regexp) *headerRegexMatcher { - return &headerRegexMatcher{key: key, re: re} -} - -func (hrm *headerRegexMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hrm.key) - if !ok { - return false - } - return hrm.re.MatchString(v) -} - -func (hrm *headerRegexMatcher) String() string { - return fmt.Sprintf("headerRegex:%v:%v", hrm.key, hrm.re.String()) -} - -type headerRangeMatcher struct { - key string - start, end int64 // represents [start, end). -} - -func newHeaderRangeMatcher(key string, start, end int64) *headerRangeMatcher { - return &headerRangeMatcher{key: key, start: start, end: end} -} - -func (hrm *headerRangeMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hrm.key) - if !ok { - return false - } - if i, err := strconv.ParseInt(v, 10, 64); err == nil && i >= hrm.start && i < hrm.end { - return true - } - return false -} - -func (hrm *headerRangeMatcher) String() string { - return fmt.Sprintf("headerRange:%v:[%d,%d)", hrm.key, hrm.start, hrm.end) -} - -type headerPresentMatcher struct { - key string - present bool -} - -func newHeaderPresentMatcher(key string, present bool) *headerPresentMatcher { - return &headerPresentMatcher{key: key, present: present} -} - -func (hpm *headerPresentMatcher) match(md metadata.MD) bool { - vs, ok := mdValuesFromOutgoingCtx(md, hpm.key) - present := ok && len(vs) > 0 - return present == hpm.present -} - -func (hpm *headerPresentMatcher) String() string { - return fmt.Sprintf("headerPresent:%v:%v", hpm.key, hpm.present) -} - -type headerPrefixMatcher struct { - key string - prefix string -} - -func newHeaderPrefixMatcher(key string, prefix string) *headerPrefixMatcher { - return &headerPrefixMatcher{key: key, prefix: prefix} -} - -func (hpm *headerPrefixMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hpm.key) - if !ok { - return false - } - return strings.HasPrefix(v, hpm.prefix) -} - -func (hpm *headerPrefixMatcher) String() string { - return fmt.Sprintf("headerPrefix:%v:%v", hpm.key, hpm.prefix) -} - -type headerSuffixMatcher struct { - key string - suffix string -} - -func newHeaderSuffixMatcher(key string, suffix string) *headerSuffixMatcher { - return &headerSuffixMatcher{key: key, suffix: suffix} -} - -func (hsm *headerSuffixMatcher) match(md metadata.MD) bool { - v, ok := mdValuesFromOutgoingCtx(md, hsm.key) - if !ok { - return false - } - return strings.HasSuffix(v, hsm.suffix) -} - -func (hsm *headerSuffixMatcher) String() string { - return fmt.Sprintf("headerSuffix:%v:%v", hsm.key, hsm.suffix) -} - -type invertMatcher struct { - m headerMatcherInterface -} - -func newInvertMatcher(m headerMatcherInterface) *invertMatcher { - return &invertMatcher{m: m} -} - -func (i *invertMatcher) match(md metadata.MD) bool { - return !i.m.match(md) -} - -func (i *invertMatcher) String() string { - return fmt.Sprintf("invert{%s}", i.m) -} diff --git a/xds/internal/resolver/serviceconfig.go b/xds/internal/resolver/serviceconfig.go index ef7c37128c13..d1dd79354ae0 100644 --- a/xds/internal/resolver/serviceconfig.go +++ b/xds/internal/resolver/serviceconfig.go @@ -22,23 +22,32 @@ import ( "context" "encoding/json" "fmt" + "math/bits" + "strings" "sync/atomic" "time" + xxhash "github.com/cespare/xxhash/v2" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/wrr" - "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/balancer/clustermanager" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const ( - cdsName = "cds_experimental" - xdsClusterManagerName = "xds_cluster_manager_experimental" + cdsName = "cds_experimental" + xdsClusterManagerName = "xds_cluster_manager_experimental" + clusterPrefix = "cluster:" + clusterSpecifierPluginPrefix = "cluster_specifier_plugin:" ) type serviceConfig struct { @@ -76,13 +85,11 @@ func (r *xdsResolver) pruneActiveClusters() { // serviceConfigJSON produces a service config in JSON format representing all // the clusters referenced in activeClusters. This includes clusters with zero // references, so they must be pruned first. -func serviceConfigJSON(activeClusters map[string]*clusterInfo) (string, error) { +func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) { // Generate children (all entries in activeClusters). children := make(map[string]xdsChildConfig) - for cluster := range activeClusters { - children[cluster] = xdsChildConfig{ - ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}), - } + for cluster, ci := range activeClusters { + children[cluster] = ci.cfg } sc := serviceConfig{ @@ -93,14 +100,16 @@ func serviceConfigJSON(activeClusters map[string]*clusterInfo) (string, error) { bs, err := json.Marshal(sc) if err != nil { - return "", fmt.Errorf("failed to marshal json: %v", err) + return nil, fmt.Errorf("failed to marshal json: %v", err) } - return string(bs), nil + return bs, nil } type virtualHost struct { // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig + // retry policy present in virtual host + retryConfig *xdsresource.RetryConfig } // routeCluster holds information about a cluster as referenced by a route. @@ -111,11 +120,13 @@ type routeCluster struct { } type route struct { - m *compositeMatcher // converted from route matchers - clusters wrr.WRR // holds *routeCluster entries + m *xdsresource.CompositeMatcher // converted from route matchers + clusters wrr.WRR // holds *routeCluster entries maxStreamDuration time.Duration // map from filter name to its config httpFilterConfigOverride map[string]httpfilter.FilterConfig + retryConfig *xdsresource.RetryConfig + hashPolicies []*xdsresource.HashPolicy } func (r route) String() string { @@ -127,7 +138,7 @@ type configSelector struct { virtualHost virtualHost routes []route clusters map[string]*clusterInfo - httpFilterConfig []xdsclient.HTTPFilter + httpFilterConfig []xdsresource.HTTPFilter } var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found") @@ -139,7 +150,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP var rt *route // Loop through routes in order and select first match. for _, r := range cs.routes { - if r.m.match(rpcInfo) { + if r.m.Match(rpcInfo) { rt = &r break } @@ -147,10 +158,12 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP if rt == nil || rt.clusters == nil { return nil, errNoMatchedRouteFound } + cluster, ok := rt.clusters.Next().(*routeCluster) if !ok { return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster) } + // Add a ref to the selected cluster, as this RPC needs this cluster until // it is committed. ref := &cs.clusters[cluster.name].refCount @@ -161,9 +174,15 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP return nil, err } + lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name) + // Request Hashes are only applicable for a Ring Hash LB. + if envconfig.XDSRingHash { + lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies)) + } + config := &iresolver.RPCConfig{ - // Communicate to the LB policy the chosen cluster. - Context: clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name), + // Communicate to the LB policy the chosen cluster and request hash, if Ring Hash LB policy. + Context: lbCtx, OnCommitted: func() { // When the RPC is committed, the cluster is no longer required. // Decrease its ref. @@ -179,13 +198,82 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP Interceptor: interceptor, } - if env.TimeoutSupport && rt.maxStreamDuration != 0 { + if rt.maxStreamDuration != 0 { config.MethodConfig.Timeout = &rt.maxStreamDuration } + if rt.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(rt.retryConfig) + } else if cs.virtualHost.retryConfig != nil { + config.MethodConfig.RetryPolicy = retryConfigToPolicy(cs.virtualHost.retryConfig) + } return config, nil } +func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPolicy { + return &serviceconfig.RetryPolicy{ + MaxAttempts: int(config.NumRetries) + 1, + InitialBackoff: config.RetryBackoff.BaseInterval, + MaxBackoff: config.RetryBackoff.MaxInterval, + BackoffMultiplier: 2, + RetryableStatusCodes: config.RetryOn, + } +} + +func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 { + var hash uint64 + var generatedHash bool + for _, policy := range hashPolicies { + var policyHash uint64 + var generatedPolicyHash bool + switch policy.HashPolicyType { + case xdsresource.HashPolicyTypeHeader: + md, ok := metadata.FromOutgoingContext(rpcInfo.Context) + if !ok { + continue + } + values := md.Get(policy.HeaderName) + // If the header isn't present, no-op. + if len(values) == 0 { + continue + } + joinedValues := strings.Join(values, ",") + if policy.Regex != nil { + joinedValues = policy.Regex.ReplaceAllString(joinedValues, policy.RegexSubstitution) + } + policyHash = xxhash.Sum64String(joinedValues) + generatedHash = true + generatedPolicyHash = true + case xdsresource.HashPolicyTypeChannelID: + // Use the static channel ID as the hash for this policy. + policyHash = cs.r.channelID + generatedHash = true + generatedPolicyHash = true + } + + // Deterministically combine the hash policies. Rotating prevents + // duplicate hash policies from cancelling each other out and preserves + // the 64 bits of entropy. + if generatedPolicyHash { + hash = bits.RotateLeft64(hash, 1) + hash = hash ^ policyHash + } + + // If terminal policy and a hash has already been generated, ignore the + // rest of the policies and use that hash already generated. + if policy.Terminal && generatedHash { + break + } + } + + if generatedHash { + return hash + } + // If no generated hash return a random long. In the grand scheme of things + // this logically will map to choosing a random backend to route request to. + return grpcrand.Uint64() +} + func (cs *configSelector) newInterceptor(rt *route, cluster *routeCluster) (iresolver.ClientInterceptor, error) { if len(cs.httpFilterConfig) == 0 { return nil, nil @@ -254,8 +342,11 @@ var newWRR = wrr.NewRandom // r.activeClusters for previously-unseen clusters. func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, error) { cs := &configSelector{ - r: r, - virtualHost: virtualHost{httpFilterConfigOverride: su.virtualHost.HTTPFilterConfigOverride}, + r: r, + virtualHost: virtualHost{ + httpFilterConfigOverride: su.virtualHost.HTTPFilterConfigOverride, + retryConfig: su.virtualHost.RetryConfig, + }, routes: make([]route, len(su.virtualHost.Routes)), clusters: make(map[string]*clusterInfo), httpFilterConfig: su.ldsConfig.httpFilterConfig, @@ -263,26 +354,30 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro for i, rt := range su.virtualHost.Routes { clusters := newWRR() - for cluster, wc := range rt.WeightedClusters { + if rt.ClusterSpecifierPlugin != "" { + clusterName := clusterSpecifierPluginPrefix + rt.ClusterSpecifierPlugin clusters.Add(&routeCluster{ - name: cluster, - httpFilterConfigOverride: wc.HTTPFilterConfigOverride, - }, int64(wc.Weight)) - - // Initialize entries in cs.clusters map, creating entries in - // r.activeClusters as necessary. Set to zero as they will be - // incremented by incRefs. - ci := r.activeClusters[cluster] - if ci == nil { - ci = &clusterInfo{refCount: 0} - r.activeClusters[cluster] = ci + name: clusterName, + }, 1) + cs.initializeCluster(clusterName, xdsChildConfig{ + ChildPolicy: balancerConfig(su.clusterSpecifierPlugins[rt.ClusterSpecifierPlugin]), + }) + } else { + for cluster, wc := range rt.WeightedClusters { + clusterName := clusterPrefix + cluster + clusters.Add(&routeCluster{ + name: clusterName, + httpFilterConfigOverride: wc.HTTPFilterConfigOverride, + }, int64(wc.Weight)) + cs.initializeCluster(clusterName, xdsChildConfig{ + ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}), + }) } - cs.clusters[cluster] = ci } cs.routes[i].clusters = clusters var err error - cs.routes[i].m, err = routeToMatcher(rt) + cs.routes[i].m, err = xdsresource.RouteToMatcher(rt) if err != nil { return nil, err } @@ -293,6 +388,8 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro } cs.routes[i].httpFilterConfigOverride = rt.HTTPFilterConfigOverride + cs.routes[i].retryConfig = rt.RetryConfig + cs.routes[i].hashPolicies = rt.HashPolicies } // Account for this config selector's clusters. Do this after no further @@ -305,9 +402,25 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro return cs, nil } +// initializeCluster initializes entries in cs.clusters map, creating entries in +// r.activeClusters as necessary. Any created entries will have a ref count set +// to zero as their ref count will be incremented by incRefs. +func (cs *configSelector) initializeCluster(clusterName string, cfg xdsChildConfig) { + ci := cs.r.activeClusters[clusterName] + if ci == nil { + ci = &clusterInfo{refCount: 0} + cs.r.activeClusters[clusterName] = ci + } + cs.clusters[clusterName] = ci + cs.clusters[clusterName].cfg = cfg +} + type clusterInfo struct { // number of references to this cluster; accessed atomically refCount int32 + // cfg is the child configuration for this cluster, containing either the + // csp config or the cds cluster config. + cfg xdsChildConfig } type interceptorList struct { diff --git a/xds/internal/resolver/serviceconfig_test.go b/xds/internal/resolver/serviceconfig_test.go index 1e253841e801..786b003c0154 100644 --- a/xds/internal/resolver/serviceconfig_test.go +++ b/xds/internal/resolver/serviceconfig_test.go @@ -19,10 +19,16 @@ package resolver import ( + "context" + "regexp" "testing" + xxhash "github.com/cespare/xxhash/v2" "github.com/google/go-cmp/cmp" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/metadata" _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) func (s) TestPruneActiveClusters(t *testing.T) { @@ -41,3 +47,72 @@ func (s) TestPruneActiveClusters(t *testing.T) { t.Fatalf("r.activeClusters = %v; want %v\nDiffs: %v", r.activeClusters, want, d) } } + +func (s) TestGenerateRequestHash(t *testing.T) { + const channelID = 12378921 + cs := &configSelector{ + r: &xdsResolver{ + cc: &testClientConn{}, + channelID: channelID, + }, + } + tests := []struct { + name string + hashPolicies []*xdsresource.HashPolicy + requestHashWant uint64 + rpcInfo iresolver.RPCInfo + }{ + // TestGenerateRequestHashHeaders tests generating request hashes for + // hash policies that specify to hash headers. + { + name: "test-generate-request-hash-headers", + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeHeader, + HeaderName: ":path", + Regex: func() *regexp.Regexp { return regexp.MustCompile("/products") }(), // Will replace /products with /new-products, to test find and replace functionality. + RegexSubstitution: "/new-products", + }}, + requestHashWant: xxhash.Sum64String("/new-products"), + rpcInfo: iresolver.RPCInfo{ + Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs(":path", "/products")), + Method: "/some-method", + }, + }, + // TestGenerateHashChannelID tests generating request hashes for hash + // policies that specify to hash something that uniquely identifies the + // ClientConn (the pointer). + { + name: "test-generate-request-hash-channel-id", + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeChannelID, + }}, + requestHashWant: channelID, + rpcInfo: iresolver.RPCInfo{}, + }, + // TestGenerateRequestHashEmptyString tests generating request hashes + // for hash policies that specify to hash headers and replace empty + // strings in the headers. + { + name: "test-generate-request-hash-empty-string", + hashPolicies: []*xdsresource.HashPolicy{{ + HashPolicyType: xdsresource.HashPolicyTypeHeader, + HeaderName: ":path", + Regex: func() *regexp.Regexp { return regexp.MustCompile("") }(), + RegexSubstitution: "e", + }}, + requestHashWant: xxhash.Sum64String("eaebece"), + rpcInfo: iresolver.RPCInfo{ + Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs(":path", "abc")), + Method: "/some-method", + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + requestHashGot := cs.generateHash(test.rpcInfo, test.hashPolicies) + if requestHashGot != test.requestHashWant { + t.Fatalf("requestHashGot = %v, requestHashWant = %v", requestHashGot, test.requestHashWant) + } + }) + } +} diff --git a/xds/internal/resolver/watch_service.go b/xds/internal/resolver/watch_service.go index 913ac4ced15c..4f8609ce9df5 100644 --- a/xds/internal/resolver/watch_service.go +++ b/xds/internal/resolver/watch_service.go @@ -20,12 +20,14 @@ package resolver import ( "fmt" - "strings" "sync" "time" "google.golang.org/grpc/internal/grpclog" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // serviceUpdate contains information received from the LDS/RDS responses which @@ -33,7 +35,10 @@ import ( // making a LDS to get the RouteConfig name. type serviceUpdate struct { // virtualHost contains routes and other configuration to route RPCs. - virtualHost *xdsclient.VirtualHost + virtualHost *xdsresource.VirtualHost + // clusterSpecifierPlugins contains the configurations for any cluster + // specifier plugins emitted by the xdsclient. + clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig // ldsConfig contains configuration that applies to all routes. ldsConfig ldsConfig } @@ -44,7 +49,7 @@ type ldsConfig struct { // maxStreamDuration is from the HTTP connection manager's // common_http_protocol_options field. maxStreamDuration time.Duration - httpFilterConfig []xdsclient.HTTPFilter + httpFilterConfig []xdsresource.HTTPFilter } // watchService uses LDS and RDS to discover information about the provided @@ -53,7 +58,11 @@ type ldsConfig struct { // Note that during race (e.g. an xDS response is received while the user is // calling cancel()), there's a small window where the callback can be called // after the watcher is canceled. The caller needs to handle this case. -func watchService(c xdsClientInterface, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { +// +// TODO(easwars): Make this function a method on the xdsResolver type. +// Currently, there is a single call site for this function, and all arguments +// passed to it are fields of the xdsResolver type. +func watchService(c xdsclient.XDSClient, serviceName string, cb func(serviceUpdate, error), logger *grpclog.PrefixLogger) (cancel func()) { w := &serviceUpdateWatcher{ logger: logger, c: c, @@ -69,7 +78,7 @@ func watchService(c xdsClientInterface, serviceName string, cb func(serviceUpdat // callback at the right time. type serviceUpdateWatcher struct { logger *grpclog.PrefixLogger - c xdsClientInterface + c xdsclient.XDSClient serviceName string ldsCancel func() serviceCb func(serviceUpdate, error) @@ -81,8 +90,8 @@ type serviceUpdateWatcher struct { rdsCancel func() } -func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, err error) { - w.logger.Infof("received LDS update: %+v, err: %v", update, err) +func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, err error) { + w.logger.Infof("received LDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() if w.closed { @@ -93,7 +102,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er // type we check is ResourceNotFound, which indicates the LDS resource // was removed, and besides sending the error to callback, we also // cancel the RDS watch. - if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound && w.rdsCancel != nil { + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound && w.rdsCancel != nil { w.rdsCancel() w.rdsName = "" w.rdsCancel = nil @@ -110,13 +119,37 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er httpFilterConfig: update.HTTPFilters, } + if update.InlineRouteConfig != nil { + // If there was an RDS watch, cancel it. + w.rdsName = "" + if w.rdsCancel != nil { + w.rdsCancel() + w.rdsCancel = nil + } + + // Handle the inline RDS update as if it's from an RDS watch. + w.applyRouteConfigUpdate(*update.InlineRouteConfig) + return + } + + // RDS name from update is not an empty string, need RDS to fetch the + // routes. + if w.rdsName == update.RouteConfigName { // If the new RouteConfigName is same as the previous, don't cancel and // restart the RDS watch. // // If the route name did change, then we must wait until the first RDS // update before reporting this LDS config. - w.serviceCb(w.lastUpdate, nil) + if w.lastUpdate.virtualHost != nil { + // We want to send an update with the new fields from the new LDS + // (e.g. max stream duration), and old fields from the previous + // RDS. + // + // But note that this should only happen when virtual host is set, + // which means an RDS was received. + w.serviceCb(w.lastUpdate, nil) + } return } w.rdsName = update.RouteConfigName @@ -126,8 +159,21 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp) } -func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, err error) { - w.logger.Infof("received RDS update: %+v, err: %v", update, err) +func (w *serviceUpdateWatcher) applyRouteConfigUpdate(update xdsresource.RouteConfigUpdate) { + matchVh := xdsresource.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) + if matchVh == nil { + // No matching virtual host found. + w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName)) + return + } + + w.lastUpdate.virtualHost = matchVh + w.lastUpdate.clusterSpecifierPlugins = update.ClusterSpecifierPlugins + w.serviceCb(w.lastUpdate, nil) +} + +func (w *serviceUpdateWatcher) handleRDSResp(update xdsresource.RouteConfigUpdate, err error) { + w.logger.Infof("received RDS update: %+v, err: %v", pretty.ToJSON(update), err) w.mu.Lock() defer w.mu.Unlock() if w.closed { @@ -142,16 +188,7 @@ func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, w.serviceCb(serviceUpdate{}, err) return } - - matchVh := findBestMatchingVirtualHost(w.serviceName, update.VirtualHosts) - if matchVh == nil { - // No matching virtual host found. - w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName)) - return - } - - w.lastUpdate.virtualHost = matchVh - w.serviceCb(w.lastUpdate, nil) + w.applyRouteConfigUpdate(update) } func (w *serviceUpdateWatcher) close() { @@ -164,97 +201,3 @@ func (w *serviceUpdateWatcher) close() { w.rdsCancel = nil } } - -type domainMatchType int - -const ( - domainMatchTypeInvalid domainMatchType = iota - domainMatchTypeUniversal - domainMatchTypePrefix - domainMatchTypeSuffix - domainMatchTypeExact -) - -// Exact > Suffix > Prefix > Universal > Invalid. -func (t domainMatchType) betterThan(b domainMatchType) bool { - return t > b -} - -func matchTypeForDomain(d string) domainMatchType { - if d == "" { - return domainMatchTypeInvalid - } - if d == "*" { - return domainMatchTypeUniversal - } - if strings.HasPrefix(d, "*") { - return domainMatchTypeSuffix - } - if strings.HasSuffix(d, "*") { - return domainMatchTypePrefix - } - if strings.Contains(d, "*") { - return domainMatchTypeInvalid - } - return domainMatchTypeExact -} - -func match(domain, host string) (domainMatchType, bool) { - switch typ := matchTypeForDomain(domain); typ { - case domainMatchTypeInvalid: - return typ, false - case domainMatchTypeUniversal: - return typ, true - case domainMatchTypePrefix: - // abc.* - return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) - case domainMatchTypeSuffix: - // *.123 - return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) - case domainMatchTypeExact: - return typ, domain == host - default: - return domainMatchTypeInvalid, false - } -} - -// findBestMatchingVirtualHost returns the virtual host whose domains field best -// matches host -// -// The domains field support 4 different matching pattern types: -// - Exact match -// - Suffix match (e.g. “*ABC”) -// - Prefix match (e.g. “ABC*) -// - Universal match (e.g. “*”) -// -// The best match is defined as: -// - A match is better if it’s matching pattern type is better -// - Exact match > suffix match > prefix match > universal match -// - If two matches are of the same pattern type, the longer match is better -// - This is to compare the length of the matching pattern, e.g. “*ABCDE” > -// “*ABC” -func findBestMatchingVirtualHost(host string, vHosts []*xdsclient.VirtualHost) *xdsclient.VirtualHost { - var ( - matchVh *xdsclient.VirtualHost - matchType = domainMatchTypeInvalid - matchLen int - ) - for _, vh := range vHosts { - for _, domain := range vh.Domains { - typ, matched := match(domain, host) - if typ == domainMatchTypeInvalid { - // The rds response is invalid. - return nil - } - if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { - // The previous match has better type, or the previous match has - // better length, or this domain isn't a match. - continue - } - matchVh = vh - matchType = typ - matchLen = len(domain) - } - } - return matchVh -} diff --git a/xds/internal/resolver/watch_service_test.go b/xds/internal/resolver/watch_service_test.go index 705a3d35ae1b..1a4b45bc8ad2 100644 --- a/xds/internal/resolver/watch_service_test.go +++ b/xds/internal/resolver/watch_service_test.go @@ -27,85 +27,39 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/proto" ) -func (s) TestMatchTypeForDomain(t *testing.T) { - tests := []struct { - d string - want domainMatchType - }{ - {d: "", want: domainMatchTypeInvalid}, - {d: "*", want: domainMatchTypeUniversal}, - {d: "bar.*", want: domainMatchTypePrefix}, - {d: "*.abc.com", want: domainMatchTypeSuffix}, - {d: "foo.bar.com", want: domainMatchTypeExact}, - {d: "foo.*.com", want: domainMatchTypeInvalid}, - } - for _, tt := range tests { - if got := matchTypeForDomain(tt.d); got != tt.want { - t.Errorf("matchTypeForDomain(%q) = %v, want %v", tt.d, got, tt.want) - } - } -} - -func (s) TestMatch(t *testing.T) { - tests := []struct { - name string - domain string - host string - wantTyp domainMatchType - wantMatched bool - }{ - {name: "invalid-empty", domain: "", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, - {name: "invalid", domain: "a.*.b", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, - {name: "universal", domain: "*", host: "abc.com", wantTyp: domainMatchTypeUniversal, wantMatched: true}, - {name: "prefix-match", domain: "abc.*", host: "abc.123", wantTyp: domainMatchTypePrefix, wantMatched: true}, - {name: "prefix-no-match", domain: "abc.*", host: "abcd.123", wantTyp: domainMatchTypePrefix, wantMatched: false}, - {name: "suffix-match", domain: "*.123", host: "abc.123", wantTyp: domainMatchTypeSuffix, wantMatched: true}, - {name: "suffix-no-match", domain: "*.123", host: "abc.1234", wantTyp: domainMatchTypeSuffix, wantMatched: false}, - {name: "exact-match", domain: "foo.bar", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: true}, - {name: "exact-no-match", domain: "foo.bar.com", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotTyp, gotMatched := match(tt.domain, tt.host); gotTyp != tt.wantTyp || gotMatched != tt.wantMatched { - t.Errorf("match() = %v, %v, want %v, %v", gotTyp, gotMatched, tt.wantTyp, tt.wantMatched) - } - }) - } -} - func (s) TestFindBestMatchingVirtualHost(t *testing.T) { var ( - oneExactMatch = &xdsclient.VirtualHost{ + oneExactMatch = &xdsresource.VirtualHost{ Domains: []string{"foo.bar.com"}, } - oneSuffixMatch = &xdsclient.VirtualHost{ + oneSuffixMatch = &xdsresource.VirtualHost{ Domains: []string{"*.bar.com"}, } - onePrefixMatch = &xdsclient.VirtualHost{ + onePrefixMatch = &xdsresource.VirtualHost{ Domains: []string{"foo.bar.*"}, } - oneUniversalMatch = &xdsclient.VirtualHost{ + oneUniversalMatch = &xdsresource.VirtualHost{ Domains: []string{"*"}, } - longExactMatch = &xdsclient.VirtualHost{ + longExactMatch = &xdsresource.VirtualHost{ Domains: []string{"v2.foo.bar.com"}, } - multipleMatch = &xdsclient.VirtualHost{ + multipleMatch = &xdsresource.VirtualHost{ Domains: []string{"pi.foo.bar.com", "314.*", "*.159"}, } - vhs = []*xdsclient.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch} + vhs = []*xdsresource.VirtualHost{oneExactMatch, oneSuffixMatch, onePrefixMatch, oneUniversalMatch, longExactMatch, multipleMatch} ) tests := []struct { name string host string - vHosts []*xdsclient.VirtualHost - want *xdsclient.VirtualHost + vHosts []*xdsresource.VirtualHost + want *xdsresource.VirtualHost }{ {name: "exact-match", host: "foo.bar.com", vHosts: vhs, want: oneExactMatch}, {name: "suffix-match", host: "123.bar.com", vHosts: vhs, want: oneSuffixMatch}, @@ -121,7 +75,7 @@ func (s) TestFindBestMatchingVirtualHost(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := findBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { + if got := xdsresource.FindBestMatchingVirtualHost(tt.host, tt.vHosts); !cmp.Equal(got, tt.want, cmp.Comparer(proto.Equal)) { t.Errorf("findBestMatchingxdsclient.VirtualHost() = %v, want %v", got, tt.want) } }) @@ -163,15 +117,15 @@ func (s) TestServiceWatch(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -179,22 +133,22 @@ func (s) TestServiceWatch(t *testing.T) { t.Fatal(err) } - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, - Routes: []*xdsclient.Route{{ + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, + Routes: []*xdsresource.Route{{ Path: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}, }}, }} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Path: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Path: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, { // Another virtual host, with different domains. Domains: []string{"random"}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -217,15 +171,15 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -234,19 +188,19 @@ func (s) TestServiceWatchLDSUpdate(t *testing.T) { } // Another LDS update with a different RDS_name. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr + "2"}, nil) - if err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr + "2"}, nil) + if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } waitForWatchRouteConfig(ctx, t, xdsC, routeStr+"2") // RDS update for the new name. - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}}} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback(routeStr+"2", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }, }, }, nil) @@ -269,19 +223,19 @@ func (s) TestServiceWatchLDSUpdateMaxStreamDuration(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}, ldsConfig: ldsConfig{maxStreamDuration: time.Second}, } - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -290,22 +244,22 @@ func (s) TestServiceWatchLDSUpdateMaxStreamDuration(t *testing.T) { } // Another LDS update with the same RDS_name but different MaxStreamDuration (zero in this case). - wantUpdate2 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}}} - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + wantUpdate2 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { t.Fatal(err) } // RDS update. - wantUpdate3 := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate3 := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster + "2": {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster + "2": {Weight: 1}}}}, }, }, }, nil) @@ -328,18 +282,18 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - wantUpdate := serviceUpdate{virtualHost: &xdsclient.VirtualHost{Domains: []string{"target"}, Routes: []*xdsclient.Route{{ + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{ Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }} - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*xdsclient.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, }, }, }, nil) @@ -349,10 +303,88 @@ func (s) TestServiceNotCancelRDSOnSameLDSUpdate(t *testing.T) { } // Another LDS update with a the same RDS_name. - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr}, nil) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) defer sCancel() - if err := xdsC.WaitForCancelRouteConfigWatch(sCtx); err != context.DeadlineExceeded { + if _, err := xdsC.WaitForCancelRouteConfigWatch(sCtx); err != context.DeadlineExceeded { t.Fatalf("wait for cancel route watch failed: %v, want nil", err) } } + +// TestServiceWatchInlineRDS covers the cases switching between: +// - LDS update contains RDS name to watch +// - LDS update contains inline RDS resource +func (s) TestServiceWatchInlineRDS(t *testing.T) { + serviceUpdateCh := testutils.NewChannel() + xdsC := fakeclient.NewClient() + cancelWatch := watchService(xdsC, targetStr, func(update serviceUpdate, err error) { + serviceUpdateCh.Send(serviceUpdateErr{u: update, err: err}) + }, nil) + defer cancelWatch() + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // First LDS update is LDS with RDS name to watch. + waitForWatchListener(ctx, t, xdsC, targetStr) + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + wantUpdate := serviceUpdate{virtualHost: &xdsresource.VirtualHost{Domains: []string{"target"}, Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}}} + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, + }, + }, + }, nil) + if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Switch LDS resp to a LDS with inline RDS resource + wantVirtualHosts2 := &xdsresource.VirtualHost{Domains: []string{"target"}, + Routes: []*xdsresource.Route{{ + Path: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}, + }}, + } + wantUpdate2 := serviceUpdate{virtualHost: wantVirtualHosts2} + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{wantVirtualHosts2}, + }}, nil) + // This inline RDS resource should cause the RDS watch to be canceled. + if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { + t.Fatalf("wait for cancel route watch failed: %v, want nil", err) + } + if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { + t.Fatal(err) + } + + // Switch LDS update back to LDS with RDS name to watch. + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{RouteConfigName: routeStr}, nil) + waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{targetStr}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsresource.WeightedCluster{cluster: {Weight: 1}}}}, + }, + }, + }, nil) + if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Switch LDS resp to a LDS with inline RDS resource again. + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{InlineRouteConfig: &xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{wantVirtualHosts2}, + }}, nil) + // This inline RDS resource should cause the RDS watch to be canceled. + if _, err := xdsC.WaitForCancelRouteConfigWatch(ctx); err != nil { + t.Fatalf("wait for cancel route watch failed: %v, want nil", err) + } + if err := verifyServiceUpdate(ctx, serviceUpdateCh, wantUpdate2); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/resolver/xds_resolver.go b/xds/internal/resolver/xds_resolver.go index d8c09db69b5a..09b3356301db 100644 --- a/xds/internal/resolver/xds_resolver.go +++ b/xds/internal/resolver/xds_resolver.go @@ -22,48 +22,81 @@ package resolver import ( "errors" "fmt" + "strings" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/client/bootstrap" - + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const xdsScheme = "xds" +// newBuilderForTesting creates a new xds resolver builder using a specific xds +// bootstrap config, so tests can use multiple xds clients in different +// ClientConns at the same time. +func newBuilderForTesting(config []byte) (resolver.Builder, error) { + return &xdsResolverBuilder{ + newXDSClient: func() (xdsclient.XDSClient, func(), error) { + return xdsclient.NewWithBootstrapContentsForTesting(config) + }, + }, nil +} + // For overriding in unittests. -var newXDSClient = func() (xdsClientInterface, error) { return xdsclient.New() } +var newXDSClient = func() (xdsclient.XDSClient, func(), error) { return xdsclient.New() } func init() { resolver.Register(&xdsResolverBuilder{}) + internal.NewXDSResolverWithConfigForTesting = newBuilderForTesting } -type xdsResolverBuilder struct{} +type xdsResolverBuilder struct { + newXDSClient func() (xdsclient.XDSClient, func(), error) +} // Build helps implement the resolver.Builder interface. // // The xds bootstrap process is performed (and a new xds client is built) every // time an xds resolver is built. -func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { +func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) { r := &xdsResolver{ - target: t, cc: cc, closed: grpcsync.NewEvent(), updateCh: make(chan suWithError, 1), activeClusters: make(map[string]*clusterInfo), + channelID: grpcrand.Uint64(), + } + defer func() { + if retErr != nil { + r.Close() + } + }() + r.logger = prefixLogger(r) + r.logger.Infof("Creating resolver for target: %+v", target) + + newXDSClient := newXDSClient + if b.newXDSClient != nil { + newXDSClient = b.newXDSClient } - r.logger = prefixLogger((r)) - r.logger.Infof("Creating resolver for target: %+v", t) - client, err := newXDSClient() + client, close, err := newXDSClient() if err != nil { return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) } - r.client = client + r.xdsClient = client + r.xdsClientClose = close + bootstrapConfig := client.BootstrapConfig() + if bootstrapConfig == nil { + return nil, errors.New("bootstrap configuration is empty") + } // If xds credentials were specified by the user, but bootstrap configs do // not contain any certificate provider configuration, it is better to fail @@ -77,18 +110,40 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op creds = opts.CredsBundle.TransportCredentials() } if xc, ok := creds.(interface{ UsesXDS() bool }); ok && xc.UsesXDS() { - bc := client.BootstrapConfig() - if len(bc.CertProviderConfigs) == 0 { + if len(bootstrapConfig.CertProviderConfigs) == 0 { return nil, errors.New("xds: xdsCreds specified but certificate_providers config missing in bootstrap file") } } - // Register a watch on the xdsClient for the user's dial target. - cancelWatch := watchService(r.client, r.target.Endpoint, r.handleServiceUpdate, r.logger) - r.logger.Infof("Watch started on resource name %v with xds-client %p", r.target.Endpoint, r.client) + // Find the client listener template to use from the bootstrap config: + // - If authority is not set in the target, use the top level template + // - If authority is set, use the template from the authority map. + template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate + if authority := target.URL.Host; authority != "" { + a := bootstrapConfig.Authorities[authority] + if a == nil { + return nil, fmt.Errorf("xds: authority %q is not found in the bootstrap file", authority) + } + if a.ClientListenerResourceNameTemplate != "" { + // This check will never be false, because + // ClientListenerResourceNameTemplate is required to start with + // xdstp://, and has a default value (not an empty string) if unset. + template = a.ClientListenerResourceNameTemplate + } + } + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + endpoint = strings.TrimPrefix(endpoint, "/") + r.ldsResourceName = bootstrap.PopulateResourceTemplate(template, endpoint) + + // Register a watch on the xdsClient for the resource name determined above. + cancelWatch := watchService(r.xdsClient, r.ldsResourceName, r.handleServiceUpdate, r.logger) + r.logger.Infof("Watch started on resource name %v with xds-client %p", r.ldsResourceName, r.xdsClient) r.cancelWatch = func() { cancelWatch() - r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.target.Endpoint, r.client) + r.logger.Infof("Watch cancel on resource name %v with xds-client %p", r.ldsResourceName, r.xdsClient) } go r.run() @@ -100,15 +155,6 @@ func (*xdsResolverBuilder) Scheme() string { return xdsScheme } -// xdsClientInterface contains methods from xdsClient.Client which are used by -// the resolver. This will be faked out in unittests. -type xdsClientInterface interface { - WatchListener(serviceName string, cb func(xdsclient.ListenerUpdate, error)) func() - WatchRouteConfig(routeName string, cb func(xdsclient.RouteConfigUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - // suWithError wraps the ServiceUpdate and error received through a watch API // callback, so that it can pushed onto the update channel as a single entity. type suWithError struct { @@ -123,14 +169,14 @@ type suWithError struct { // (which performs LDS/RDS queries for the same), and passes the received // updates to the ClientConn. type xdsResolver struct { - target resolver.Target - cc resolver.ClientConn - closed *grpcsync.Event - - logger *grpclog.PrefixLogger + cc resolver.ClientConn + closed *grpcsync.Event + logger *grpclog.PrefixLogger + ldsResourceName string // The underlying xdsClient which performs all xDS requests and responses. - client xdsClientInterface + xdsClient xdsclient.XDSClient + xdsClientClose func() // A channel for the watch API callback to write service updates on to. The // updates are read by the run goroutine and passed on to the ClientConn. updateCh chan suWithError @@ -142,6 +188,10 @@ type xdsResolver struct { activeClusters map[string]*clusterInfo curConfigSelector *configSelector + + // A random number which uniquely identifies the channel which owns this + // resolver. + channelID uint64 } // sendNewServiceConfig prunes active clusters, generates a new service config @@ -163,7 +213,6 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { return true } - // Produce the service config. sc, err := serviceConfigJSON(r.activeClusters) if err != nil { // JSON marshal error; should never happen. @@ -171,13 +220,13 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool { r.cc.ReportError(err) return false } - r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.target.Endpoint, r.client, sc) + r.logger.Infof("Received update on resource %v from xds-client %p, generated service config: %v", r.ldsResourceName, r.xdsClient, pretty.FormatJSON(sc)) // Send the update to the ClientConn. state := iresolver.SetConfigSelector(resolver.State{ - ServiceConfig: r.cc.ParseServiceConfig(sc), + ServiceConfig: r.cc.ParseServiceConfig(string(sc)), }, cs) - r.cc.UpdateState(state) + r.cc.UpdateState(xdsclient.SetClient(state, r.xdsClient)) return true } @@ -190,8 +239,8 @@ func (r *xdsResolver) run() { return case update := <-r.updateCh: if update.err != nil { - r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.target.Endpoint, r.client, update.err) - if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound { + r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.ldsResourceName, r.xdsClient, update.err) + if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { // If error is resource-not-found, it means the LDS // resource was removed. Ultimately send an empty service // config, which picks pick-first, with no address, and @@ -218,7 +267,7 @@ func (r *xdsResolver) run() { // Create the config selector for this update. cs, err := r.newConfigSelector(update.su) if err != nil { - r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.target.Endpoint, r.client, err) + r.logger.Warningf("Error parsing update on resource %v from xds-client %p: %v", r.ldsResourceName, r.xdsClient, err) r.cc.ReportError(err) continue } @@ -260,8 +309,15 @@ func (*xdsResolver) ResolveNow(o resolver.ResolveNowOptions) {} // Close closes the resolver, and also closes the underlying xdsClient. func (r *xdsResolver) Close() { - r.cancelWatch() - r.client.Close() + // Note that Close needs to check for nils even if some of them are always + // set in the constructor. This is because the constructor defers Close() in + // error cases, and the fields might not be set when the error happens. + if r.cancelWatch != nil { + r.cancelWatch() + } + if r.xdsClientClose != nil { + r.xdsClientClose() + } r.closed.Fire() r.logger.Infof("Shutdown") } diff --git a/xds/internal/resolver/xds_resolver_test.go b/xds/internal/resolver/xds_resolver_test.go index 36c7416cb436..43b835fada93 100644 --- a/xds/internal/resolver/xds_resolver_test.go +++ b/xds/internal/resolver/xds_resolver_test.go @@ -21,48 +21,67 @@ package resolver import ( "context" "errors" + "net/url" "reflect" "strings" "testing" "time" + xxhash "github.com/cespare/xxhash/v2" + "github.com/envoyproxy/go-control-plane/pkg/wellknown" "github.com/google/go-cmp/cmp" + "github.com/google/uuid" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" xdscreds "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpctest" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/testutils" + xdsbootstrap "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" "google.golang.org/grpc/internal/wrr" - "google.golang.org/grpc/internal/xds/env" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" - _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config "google.golang.org/grpc/xds/internal/balancer/clustermanager" - "google.golang.org/grpc/xds/internal/client" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/balancer/ringhash" "google.golang.org/grpc/xds/internal/httpfilter" "google.golang.org/grpc/xds/internal/httpfilter/router" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + + _ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // To parse LB config ) const ( targetStr = "target" routeStr = "route" cluster = "cluster" - defaultTestTimeout = 1 * time.Second + defaultTestTimeout = 10 * time.Second defaultTestShortTimeout = 100 * time.Microsecond ) -var target = resolver.Target{Endpoint: targetStr} +var target = resolver.Target{URL: *testutils.MustParseURL("xds:///" + targetStr)} -var routerFilter = xdsclient.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} -var routerFilterList = []xdsclient.HTTPFilter{routerFilter} +var routerFilter = xdsresource.HTTPFilter{Name: "rtr", Filter: httpfilter.Get(router.TypeURL)} type s struct { grpctest.Tester @@ -73,31 +92,37 @@ func Test(t *testing.T) { } func (s) TestRegister(t *testing.T) { - b := resolver.Get(xdsScheme) - if b == nil { + if resolver.Get(xdsScheme) == nil { t.Errorf("scheme %v is not registered", xdsScheme) } } -// testClientConn is a fake implemetation of resolver.ClientConn. All is does -// is to store the state received from the resolver locally and signal that -// event through a channel. +// testClientConn is a fake implemetation of resolver.ClientConn that pushes +// state updates and errors returned by the resolver on to channels for +// consumption by tests. type testClientConn struct { resolver.ClientConn stateCh *testutils.Channel errorCh *testutils.Channel } -func (t *testClientConn) UpdateState(s resolver.State) { +func (t *testClientConn) UpdateState(s resolver.State) error { + // Tests should ideally consume all state updates, and if one happens + // unexpectedly, tests should catch it. Hence using `Send()` here. t.stateCh.Send(s) + return nil } func (t *testClientConn) ReportError(err error) { - t.errorCh.Send(err) + // When used with a go-control-plane management server that continuously + // resends resources which are NACKed by the xDS client, using a `Replace()` + // here simplifies tests which will have access to the most recently + // received error. + t.errorCh.Replace(err) } func (t *testClientConn) ParseServiceConfig(jsonSC string) *serviceconfig.ParseResult { - return internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(jsonSC) + return internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(jsonSC) } func newTestClientConn() *testClientConn { @@ -107,36 +132,100 @@ func newTestClientConn() *testClientConn { } } -// TestResolverBuilder tests the xdsResolverBuilder's Build method with -// different parameters. -func (s) TestResolverBuilder(t *testing.T) { +// TestResolverBuilder_ClientCreationFails tests the case where xDS client +// creation fails, and verifies that xDS resolver build fails as well. +func (s) TestResolverBuilder_ClientCreationFails(t *testing.T) { + // Override xDS client creation function and return an error. + origNewClient := newXDSClient + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return nil, nil, errors.New("failed to create xDS client") + } + defer func() { + newXDSClient = origNewClient + }() + + // Build an xDS resolver and expect it to fail. + builder := resolver.Get(xdsScheme) + if builder == nil { + t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) + } + if _, err := builder.Build(target, newTestClientConn(), resolver.BuildOptions{}); err == nil { + t.Fatalf("builder.Build(%v) succeeded when expected to fail", target) + } +} + +// TestResolverBuilder_DifferentBootstrapConfigs tests the resolver builder's +// Build() method with different xDS bootstrap configurations. +func (s) TestResolverBuilder_DifferentBootstrapConfigs(t *testing.T) { tests := []struct { - name string - xdsClientFunc func() (xdsClientInterface, error) - wantErr bool + name string + bootstrapCfg *bootstrap.Config // Empty top-level xDS server config, will be set by test logic. + target resolver.Target + buildOpts resolver.BuildOptions + wantErr string }{ { - name: "simple-good", - xdsClientFunc: func() (xdsClientInterface, error) { - return fakeclient.NewClient(), nil + name: "good", + bootstrapCfg: &bootstrap.Config{}, + target: target, + }, + { + name: "authority not defined in bootstrap", + bootstrapCfg: &bootstrap.Config{ + ClientDefaultListenerResourceNameTemplate: "%s", + Authorities: map[string]*bootstrap.Authority{ + "test-authority": { + ClientListenerResourceNameTemplate: "xdstp://test-authority/%s", + }, + }, }, - wantErr: false, + target: resolver.Target{ + URL: url.URL{ + Host: "non-existing-authority", + Path: "/" + targetStr, + }, + }, + wantErr: `authority "non-existing-authority" is not found in the bootstrap file`, }, { - name: "newXDSClient-throws-error", - xdsClientFunc: func() (xdsClientInterface, error) { - return nil, errors.New("newXDSClient-throws-error") + name: "xDS creds specified without certificate providers in bootstrap", + bootstrapCfg: &bootstrap.Config{}, + target: target, + buildOpts: resolver.BuildOptions{ + DialCreds: func() credentials.TransportCredentials { + creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) + if err != nil { + t.Fatalf("xds.NewClientCredentials() failed: %v", err) + } + return creds + }(), }, - wantErr: true, + wantErr: `xdsCreds specified but certificate_providers config missing in bootstrap file`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - // Fake out the xdsClient creation process by providing a fake. - oldClientMaker := newXDSClient - newXDSClient = test.xdsClientFunc + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Starting xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Add top-level xDS server config corresponding to the above + // management server. + test.bootstrapCfg.XDSServer = xdstestutils.ServerConfigForAddress(t, mgmtServer.Address) + + // Override xDS client creation to use bootstrap configuration + // specified by the test. + origNewClient := newXDSClient + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + // The watch timeout and idle authority timeout values passed to + // NewWithConfigForTesing() are immaterial for this test, as we + // are only testing the resolver build functionality. + return xdsclient.NewWithConfigForTesting(test.bootstrapCfg, defaultTestTimeout, defaultTestTimeout) + } defer func() { - newXDSClient = oldClientMaker + newXDSClient = origNewClient }() builder := resolver.Get(xdsScheme) @@ -144,8 +233,11 @@ func (s) TestResolverBuilder(t *testing.T) { t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) } - r, err := builder.Build(target, newTestClientConn(), resolver.BuildOptions{}) - if (err != nil) != test.wantErr { + r, err := builder.Build(test.target, newTestClientConn(), test.buildOpts) + if gotErr, wantErr := err != nil, test.wantErr != ""; gotErr != wantErr { + t.Fatalf("builder.Build(%v) returned err: %v, wantErr: %v", target, err, test.wantErr) + } + if test.wantErr != "" && !strings.Contains(err.Error(), test.wantErr) { t.Fatalf("builder.Build(%v) returned err: %v, wantErr: %v", target, err, test.wantErr) } if err != nil { @@ -157,64 +249,47 @@ func (s) TestResolverBuilder(t *testing.T) { } } -// TestResolverBuilder_xdsCredsBootstrapMismatch tests the case where an xds -// resolver is built with xds credentials being specified by the user. The -// bootstrap file does not contain any certificate provider configuration -// though, and therefore we expect the resolver build to fail. -func (s) TestResolverBuilder_xdsCredsBootstrapMismatch(t *testing.T) { - // Fake out the xdsClient creation process by providing a fake, which does - // not have any certificate provider configuration. - oldClientMaker := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { - fc := fakeclient.NewClient() - fc.SetBootstrapConfig(&bootstrap.Config{}) - return fc, nil - } - defer func() { newXDSClient = oldClientMaker }() - - builder := resolver.Get(xdsScheme) - if builder == nil { - t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) - } - - // Create xds credentials to be passed to resolver.Build(). - creds, err := xdscreds.NewClientCredentials(xdscreds.ClientOptions{FallbackCreds: insecure.NewCredentials()}) - if err != nil { - t.Fatalf("xds.NewClientCredentials() failed: %v", err) - } - - // Since the fake xds client is not configured with any certificate provider - // configs, and we are specifying xds credentials in the call to - // resolver.Build(), we expect it to fail. - if _, err := builder.Build(target, newTestClientConn(), resolver.BuildOptions{DialCreds: creds}); err == nil { - t.Fatal("builder.Build() succeeded when expected to fail") - } -} - type setupOpts struct { - xdsClientFunc func() (xdsClientInterface, error) + bootstrapC *bootstrap.Config + target resolver.Target } -func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *testClientConn, func()) { +func testSetup(t *testing.T, opts setupOpts) (*xdsResolver, *fakeclient.Client, *testClientConn, func()) { t.Helper() + fc := fakeclient.NewClient() + if opts.bootstrapC != nil { + fc.SetBootstrapConfig(opts.bootstrapC) + } oldClientMaker := newXDSClient - newXDSClient = opts.xdsClientFunc + closeCh := make(chan struct{}) + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return fc, grpcsync.OnceFunc(func() { close(closeCh) }), nil + } cancel := func() { + // Make sure the xDS client is closed, in all (successful or failed) + // cases. + select { + case <-time.After(defaultTestTimeout): + t.Fatalf("timeout waiting for close") + case <-closeCh: + } newXDSClient = oldClientMaker } - builder := resolver.Get(xdsScheme) if builder == nil { t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) } tcc := newTestClientConn() - r, err := builder.Build(target, tcc, resolver.BuildOptions{}) + r, err := builder.Build(opts.target, tcc, resolver.BuildOptions{}) if err != nil { t.Fatalf("builder.Build(%v) returned err: %v", target, err) } - return r.(*xdsResolver), tcc, cancel + return r.(*xdsResolver), fc, tcc, func() { + r.Close() + cancel() + } } // waitForWatchListener waits for the WatchListener method to be called on the @@ -247,181 +322,494 @@ func waitForWatchRouteConfig(ctx context.Context, t *testing.T, xdsC *fakeclient } } -// TestXDSResolverWatchCallbackAfterClose tests the case where a service update -// from the underlying xdsClient is received after the resolver is closed. -func (s) TestXDSResolverWatchCallbackAfterClose(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, - }) - defer cancel() +// buildResolverForTarget builds an xDS resolver for the given target. It +// returns a testClientConn which allows inspection of resolver updates, and a +// function to close the resolver once the test is complete. +func buildResolverForTarget(t *testing.T, target resolver.Target) (*testClientConn, func()) { + builder := resolver.Get(xdsScheme) + if builder == nil { + t.Fatalf("resolver.Get(%v) returned nil", xdsScheme) + } - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Call the watchAPI callback after closing the resolver, and make sure no - // update is triggerred on the ClientConn. - xdsR.Close() - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, - }, + tcc := newTestClientConn() + r, err := builder.Build(target, tcc, resolver.BuildOptions{}) + if err != nil { + t.Fatalf("builder.Build(%v) returned err: %v", target, err) + } + return tcc, r.Close +} + +// TestResolverResourceName builds an xDS resolver and verifies that the +// resource name specified in the discovery request matches expectations. +func (s) TestResolverResourceName(t *testing.T) { + // Federation support is required when new style names are used. + oldXDSFederation := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldXDSFederation }() + + tests := []struct { + name string + listenerResourceNameTemplate string + extraAuthority string + dialTarget string + wantResourceName string + }{ + { + name: "default %s old style", + listenerResourceNameTemplate: "%s", + dialTarget: "xds:///target", + wantResourceName: "target", + }, + { + name: "old style no percent encoding", + listenerResourceNameTemplate: "/path/to/%s", + dialTarget: "xds:///target", + wantResourceName: "/path/to/target", + }, + { + name: "new style with %s", + listenerResourceNameTemplate: "xdstp://authority.com/%s", + dialTarget: "xds:///0.0.0.0:8080", + wantResourceName: "xdstp://authority.com/0.0.0.0:8080", + }, + { + name: "new style percent encoding", + listenerResourceNameTemplate: "xdstp://authority.com/%s", + dialTarget: "xds:///[::1]:8080", + wantResourceName: "xdstp://authority.com/%5B::1%5D:8080", + }, + { + name: "new style different authority", + listenerResourceNameTemplate: "xdstp://authority.com/%s", + extraAuthority: "test-authority", + dialTarget: "xds://test-authority/target", + wantResourceName: "xdstp://test-authority/envoy.config.listener.v3.Listener/target", }, - }, nil) + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup the management server to push the requested resource name + // on to a channel. No resources are configured on the management + // server as part of this test, as we are only interested in the + // resource name being requested. + resourceNameCh := make(chan string, 1) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + // When the resolver is being closed, the watch associated + // with the listener resource will be cancelled, and it + // might result in a discovery request with no resource + // names. Hence, we only consider requests which contain a + // resource name. + var name string + if len(req.GetResourceNames()) == 1 { + name = req.GetResourceNames()[0] + } + select { + case resourceNameCh <- name: + default: + } + return nil + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() - if gotVal, gotErr := tcc.stateCh.Receive(ctx); gotErr != context.DeadlineExceeded { - t.Fatalf("ClientConn.UpdateState called after xdsResolver is closed: %v", gotVal) + // Create a bootstrap configuration with test options. + opts := xdsbootstrap.Options{ + ServerURI: mgmtServer.Address, + ClientDefaultListenerResourceNameTemplate: tt.listenerResourceNameTemplate, + } + if tt.extraAuthority != "" { + // In this test, we really don't care about having multiple + // management servers. All we need to verify is whether the + // resource name matches expectation. + opts.Authorities = map[string]string{ + tt.extraAuthority: mgmtServer.Address, + } + } + cleanup, err := xdsbootstrap.CreateFile(opts) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + _, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL(tt.dialTarget)}) + defer rClose() + + // Verify the resource name in the discovery request being sent out. + select { + case gotResourceName := <-resourceNameCh: + if gotResourceName != tt.wantResourceName { + t.Fatalf("Received discovery request with resource name: %v, want %v", gotResourceName, tt.wantResourceName) + } + case <-time.After(defaultTestTimeout): + t.Fatalf("Timeout when waiting for discovery request") + } + }) } } -// TestXDSResolverBadServiceUpdate tests the case the xdsClient returns a bad -// service update. -func (s) TestXDSResolverBadServiceUpdate(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, +// TestResolverWatchCallbackAfterClose tests the case where a service update +// from the underlying xDS client is received after the resolver is closed, and +// verifies that the update is not propagated to the ClientConn. +func (s) TestResolverWatchCallbackAfterClose(t *testing.T) { + // Setup the management server that synchronizes with the test goroutine + // using two channels. The management server signals the test goroutine when + // it receives a discovery request for a route configuration resource. And + // the test goroutine signals the management server when the resolver is + // closed. + waitForRouteConfigDiscoveryReqCh := make(chan struct{}, 1) + waitForResolverCloseCh := make(chan struct{}) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + if req.GetTypeUrl() == version.V3RouteConfigURL { + select { + case waitForRouteConfigDiscoveryReqCh <- struct{}{}: + default: + } + <-waitForResolverCloseCh + } + return nil + }, }) - defer func() { - cancel() - xdsR.Close() - }() + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + // Configure listener and route configuration resources on the management + // server. + const serviceName = "my-service-client-side-xds" + rdsName := "route-" + serviceName + cdsName := "cluster-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, serviceName, cdsName)}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - // Invoke the watchAPI callback with a bad service update and wait for the - // ReportError method to be called on the ClientConn. - suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() - if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { - t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) + // Wait for a discovery request for a route configuration resource. + select { + case <-waitForRouteConfigDiscoveryReqCh: + case <-ctx.Done(): + t.Fatal("Timeout when waiting for a discovery request for a route configuration resource") + } + + // Close the resolver and unblock the management server. + rClose() + close(waitForResolverCloseCh) + + // Verify that the update from the management server is not propagated to + // the ClientConn. The xDS resolver, once closed, is expected to drop + // updates from the xDS client. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := tcc.stateCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatalf("ClientConn received an update from the resolver that was closed: %v", err) } } -// TestXDSResolverGoodServiceUpdate tests the happy case where the resolver -// gets a good service update from the xdsClient. -func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, - }) +// TestResolverCloseClosesXDSClient tests that the xDS resolver's Close method +// closes the xDS client. +func (s) TestResolverCloseClosesXDSClient(t *testing.T) { + bootstrapCfg := &bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, "dummy-management-server-address"), + } + + // Override xDS client creation to use bootstrap configuration pointing to a + // dummy management server. Also close a channel when the returned xDS + // client is closed. + closeCh := make(chan struct{}) + origNewClient := newXDSClient + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + c, cancel, err := xdsclient.NewWithConfigForTesting(bootstrapCfg, defaultTestTimeout, defaultTestTimeout) + return c, func() { + close(closeCh) + cancel() + }, err + } defer func() { - cancel() - xdsR.Close() + newXDSClient = origNewClient }() + _, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///my-service-client-side-xds")}) + rClose() + + select { + case <-closeCh: + case <-time.After(defaultTestTimeout): + t.Fatal("Timeout when waiting for xDS client to be closed") + } +} + +// TestResolverBadServiceUpdate tests the case where a resource returned by the +// management server is NACKed by the xDS client, which then returns an update +// containing an error to the resolver. Verifies that the update is propagated +// to the ClientConn by the resolver. It also tests the cases where the resolver +// gets a good update subsequently, and another error after the good update. +func (s) TestResolverBadServiceUpdate(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Configure a listener resource that is expected to be NACKed because it + // does not contain the `RouteSpecifier` field in the HTTPConnectionManager. + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + }) + lis := &v3listenerpb.Listener{ + Name: serviceName, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + } + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{lis}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - defer replaceRandNumGenerator(0)() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + wantErr := "no RouteSpecifier" + val, err := tcc.errorCh.Receive(ctx) + if err != nil { + t.Fatal("Timeout when waiting for error to be propagated to the ClientConn") + } + gotErr := val.(error) + if gotErr == nil || !strings.Contains(gotErr.Error(), wantErr) { + t.Fatalf("Received error from resolver %q, want %q", gotErr, wantErr) + } + + // Configure good listener and route configuration resources on the + // management server. + rdsName := "route-" + serviceName + cdsName := "cluster-" + serviceName + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(serviceName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, serviceName, cdsName)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Expect a good update from the resolver. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState := val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + + // Configure another bad resource on the management server. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{lis}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Expect an error update from the resolver. + val, err = tcc.errorCh.Receive(ctx) + if err != nil { + t.Fatal("Timeout when waiting for error to be propagated to the ClientConn") + } + gotErr = val.(error) + if gotErr == nil || !strings.Contains(gotErr.Error(), wantErr) { + t.Fatalf("Received error from resolver %q, want %q", gotErr, wantErr) + } +} + +// TestResolverGoodServiceUpdate tests the case where the resource returned by +// the management server is ACKed by the xDS client, which then returns a good +// service update to the resolver. The test verifies that the service config +// returned by the resolver matches expectations, and that the config selector +// returned by the resolver picks clusters based on the route configuration +// received from the management server. +func (s) TestResolverGoodServiceUpdate(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + ldsName := serviceName + rdsName := "route-" + serviceName for _, tt := range []struct { - routes []*xdsclient.Route - wantJSON string - wantClusters map[string]bool + routeConfig *v3routepb.RouteConfiguration + wantServiceConfig string + wantClusters map[string]bool }{ { - routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, - wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] + // A route configuration with a single cluster. + routeConfig: e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: ldsName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeCluster, + ClusterName: "test-cluster-1", + }), + wantServiceConfig: ` +{ + "loadBalancingConfig": [{ + "xds_cluster_manager_experimental": { + "children": { + "cluster:test-cluster-1": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "test-cluster-1" + } + }] } } - }}]}`, - wantClusters: map[string]bool{"test-cluster-1": true}, + } + }] +}`, + wantClusters: map[string]bool{"cluster:test-cluster-1": true}, }, { - routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ - "cluster_1": {Weight: 75}, - "cluster_2": {Weight: 25}, - }}}, - // This update contains the cluster from the previous update as - // well as this update, as the previous config selector still - // references the old cluster when the new one is pushed. - wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - }, - "cluster_1":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] + // A route configuration with a two new clusters. + routeConfig: e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: ldsName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeWeightedCluster, + WeightedClusters: map[string]int{"cluster_1": 75, "cluster_2": 25}, + }), + // This update contains the cluster from the previous update as well + // as this update, as the previous config selector still references + // the old cluster when the new one is pushed. + wantServiceConfig: ` +{ + "loadBalancingConfig": [{ + "xds_cluster_manager_experimental": { + "children": { + "cluster:test-cluster-1": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "test-cluster-1" + } + }] }, - "cluster_2":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] - } - } - }}]}`, - wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true}, - }, - { - routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ - "cluster_1": {Weight: 75}, - "cluster_2": {Weight: 25}, - }}}, - // With this redundant update, the old config selector has been - // stopped, so there are no more references to the first cluster. - // Only the second update's clusters should remain. - wantJSON: `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "cluster_1":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_1"}}] + "cluster:cluster_1": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "cluster_1" + } + }] }, - "cluster_2":{ - "childPolicy":[{"cds_experimental":{"cluster":"cluster_2"}}] + "cluster:cluster_2": { + "childPolicy": [{ + "cds_experimental": { + "cluster": "cluster_2" + } + }] } } - }}]}`, - wantClusters: map[string]bool{"cluster_1": true, "cluster_2": true}, + } + }] +}`, + wantClusters: map[string]bool{"cluster:cluster_1": true, "cluster:cluster_2": true}, }, } { - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: tt.routes, - }, - }, - }, nil) - + // Configure the management server with a good listener resource and a + // route configuration resource, as specified by the test case. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{tt.routeConfig}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - gotState, err := tcc.stateCh.Receive(ctx) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(tt.wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(tt.wantServiceConfig) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Error("received nil config selector") - continue + t.Fatal("Received nil config selector in update from resolver") } pickedClusters := make(map[string]bool) @@ -429,604 +817,928 @@ func (s) TestXDSResolverGoodServiceUpdate(t *testing.T) { // with the random number generator stubbed out, we can rely on this // to be 100% reproducible. for i := 0; i < 100; i++ { - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } cluster := clustermanager.GetPickedClusterForTesting(res.Context) pickedClusters[cluster] = true res.OnCommitted() } - if !reflect.DeepEqual(pickedClusters, tt.wantClusters) { + if !cmp.Equal(pickedClusters, tt.wantClusters) { t.Errorf("Picked clusters: %v; want: %v", pickedClusters, tt.wantClusters) } } } -// TestXDSResolverRemovedWithRPCs tests the case where a config selector sends -// an empty update to the resolver after the resource is removed. -func (s) TestXDSResolverRemovedWithRPCs(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, +// TestResolverRequestHash tests a case where a resolver receives a RouteConfig update +// with a HashPolicy specifying to generate a hash. The configSelector generated should +// successfully generate a Hash. +func (s) TestResolverRequestHash(t *testing.T) { + oldRH := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRH }() + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + ldsName := serviceName + rdsName := "route-" + serviceName + // Configure the management server with a good listener resource and a + // route configuration resource that specifies a hash policy. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "test-cluster-1", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }, + }}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{{ + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: ":path", + }, + }, + Terminal: true, + }}, + }}, + }}, + }}, + }}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - defer xdsR.Close() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState := val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + cs := iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } + + // Selecting a config when there was a hash policy specified in the route + // that will be selected should put a request hash in the config's context. + res, err := cs.SelectConfig(iresolver.RPCInfo{ + Context: metadata.NewOutgoingContext(ctx, metadata.Pairs(":path", "/products")), + Method: "/service/method", + }) + if err != nil { + t.Fatalf("cs.SelectConfig(): %v", err) + } + gotHash := ringhash.GetRequestHashForTesting(res.Context) + wantHash := xxhash.Sum64String("/products") + if gotHash != wantHash { + t.Fatalf("Got request hash: %v, want: %v", gotHash, wantHash) + } +} + +// TestResolverRemovedWithRPCs tests the case where resources are removed from +// the management server, causing it to send an empty update to the xDS client, +// which returns a resource-not-found error to the xDS resolver. The test +// verifies that an ongoing RPC is handled properly when this happens. +func (s) TestResolverRemovedWithRPCs(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + ldsName := serviceName + rdsName := "route-" + serviceName + // Configure the management server with a good listener and route + // configuration resource. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "test-cluster-1")}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, - }, - }, - }, nil) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - gotState, err := tcc.stateCh.Receive(ctx) + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:test-cluster-1": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "test-cluster-1" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } - // "Make an RPC" by invoking the config selector. cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatalf("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } - // Delete the resource - suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + // Delete the resources on the management server. This should result in a + // resource-not-found error from the xDS client. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{NodeID: nodeID}); err != nil { + t.Fatal(err) + } - if _, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + // The RPC started earlier is still in progress. So, the xDS resolver will + // not produce an empty service config at this point. Instead it will retain + // the cluster to which the RPC is ongoing in the service config, but will + // return an erroring config selector which will fail new RPCs. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState = val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } + cs = iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") + } + _, err = cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) + if err == nil || status.Code(err) != codes.Unavailable { + t.Fatalf("cs.SelectConfig() returned: %v, want: %v", err, codes.Unavailable) } // "Finish the RPC"; this could cause a panic if the resolver doesn't // handle it correctly. res.OnCommitted() + + // Now that the RPC is committed, the xDS resolver is expected to send an + // update with an empty service config. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) + } + rState = val.(resolver.State) + if err := rState.ServiceConfig.Err; err != nil { + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) + } + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(`{}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) + } } -// TestXDSResolverRemovedResource tests for proper behavior after a resource is -// removed. -func (s) TestXDSResolverRemovedResource(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, +// TestResolverRemovedResource tests the case where resources returned by the +// management server are removed. The test verifies that the resolver pushes the +// expected config selector and service config in this case. +func (s) TestResolverRemovedResource(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, }) - defer cancel() - defer xdsR.Close() + if err != nil { + t.Fatal(err) + } + defer cleanup() + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Configure the management server with a good listener and route + // configuration resource. + ldsName := serviceName + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "test-cluster-1")}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, - }, - }, - }, nil) - wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - } - } - }}]}` - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - gotState, err := tcc.stateCh.Receive(ctx) + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:test-cluster-1": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "test-cluster-1" + } + } + ] + } + } + } + } + ] +}`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } // "Make an RPC" by invoking the config selector. cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatalf("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } // "Finish the RPC"; this could cause a panic if the resolver doesn't // handle it correctly. res.OnCommitted() - // Delete the resource. The channel should receive a service config with the - // original cluster but with an erroring config selector. - suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + // Delete the resources on the management server, resulting in a + // resource-not-found error from the xDS client. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{NodeID: nodeID}); err != nil { + t.Fatal(err) + } - if gotState, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + // The channel should receive the existing service config with the original + // cluster but with an erroring config selector. + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState = gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } // "Make another RPC" by invoking the config selector. cs = iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatalf("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - res, err = cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + res, err = cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err == nil || status.Code(err) != codes.Unavailable { - t.Fatalf("Expected UNAVAILABLE error from cs.SelectConfig(_); got %v, %v", res, err) + t.Fatalf("cs.SelectConfig() got %v, %v, expected UNAVAILABLE error", res, err) } // In the meantime, an empty ServiceConfig update should have been sent. - if gotState, err = tcc.stateCh.Receive(ctx); err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + val, err = tcc.stateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState = gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - wantSCParsed = internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)("{}") + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)("{}") if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Error("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } } -func (s) TestXDSResolverWRR(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, - }) - defer func() { - cancel() - xdsR.Close() - }() +// TestResolverWRR tests the case where the route configuration returned by the +// management server contains a set of weighted clusters. The test performs a +// bunch of RPCs using the cluster specifier returned by the resolver, and +// verifies the cluster distribution. +func (s) TestResolverWRR(t *testing.T) { + defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) + newWRR = testutils.NewTestWRR + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + ldsName := serviceName + rdsName := "route-" + serviceName + // Configure the management server with a good listener resource and a + // route configuration resource. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.RouteConfigResourceWithOptions(e2e.RouteConfigOptions{ + RouteConfigName: rdsName, + ListenerName: ldsName, + ClusterSpecifierType: e2e.RouteConfigClusterSpecifierTypeWeightedCluster, + WeightedClusters: map[string]int{"A": 75, "B": 25}, + })}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) - newWRR = xdstestutils.NewTestWRR - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{ - "A": {Weight: 5}, - "B": {Weight: 10}, - }}}, - }, - }, - }, nil) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + // Read the update pushed by the resolver to the ClientConn. gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatal("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } + // Make RPCs are verify WRR behavior in the cluster specifier. picks := map[string]int{} - for i := 0; i < 30; i++ { - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + for i := 0; i < 100; i++ { + res, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } picks[clustermanager.GetPickedClusterForTesting(res.Context)]++ res.OnCommitted() } - want := map[string]int{"A": 10, "B": 20} - if !reflect.DeepEqual(picks, want) { - t.Errorf("picked clusters = %v; want %v", picks, want) + want := map[string]int{"cluster:A": 75, "cluster:B": 25} + if !cmp.Equal(picks, want) { + t.Errorf("Picked clusters: %v; want: %v", picks, want) } } -func (s) TestXDSResolverMaxStreamDuration(t *testing.T) { - defer func(old bool) { env.TimeoutSupport = old }(env.TimeoutSupport) - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, - }) - defer func() { - cancel() - xdsR.Close() - }() - - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, MaxStreamDuration: time.Second, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) +// TestResolverMaxStreamDuration tests the case where the resolver receives max +// stream duration as part of the listener and route configuration resources. +// The test verifies that the RPC timeout returned by the config selector +// matches expectations. A non-nil max stream duration (this includes an +// explicit zero value) in a matching route overrides the value specified in the +// listener resource. +func (s) TestResolverMaxStreamDuration(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() - defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) - newWRR = xdstestutils.NewTestWRR - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{ - Prefix: newStringP("/foo"), - WeightedClusters: map[string]xdsclient.WeightedCluster{"A": {Weight: 1}}, - MaxStreamDuration: newDurationP(5 * time.Second), - }, { - Prefix: newStringP("/bar"), - WeightedClusters: map[string]xdsclient.WeightedCluster{"B": {Weight: 1}}, - MaxStreamDuration: newDurationP(0), - }, { - Prefix: newStringP(""), - WeightedClusters: map[string]xdsclient.WeightedCluster{"C": {Weight: 1}}, - }}, + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + + // Configure the management server with a listener resource that specifies a + // max stream duration as part of its HTTP connection manager. Also + // configure a route configuration resource, which has multiple routes with + // different values of max stream duration. + ldsName := serviceName + rdsName := "route-" + serviceName + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, }, + RouteConfigName: rdsName, + }}, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(1 * time.Second), }, - }, nil) + }) + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{{ + Name: ldsName, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + }}, + Routes: []*v3routepb.RouteConfiguration{{ + Name: rdsName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsName}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/foo"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "A", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }}, + }, + MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{ + MaxStreamDuration: durationpb.New(5 * time.Second), + }, + }}, + }, + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/bar"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "B", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }}, + }, + MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{ + MaxStreamDuration: durationpb.New(0 * time.Second), + }, + }}, + }, + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + { + Name: "C", + Weight: &wrapperspb.UInt32Value{Value: 100}, + }, + }}, + }, + }}, + }, + }, + }}, + }}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + // Read the update pushed by the resolver to the ClientConn. gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatal("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } testCases := []struct { - name string - method string - timeoutSupport bool - want *time.Duration + name string + method string + want *time.Duration }{{ - name: "RDS setting", - method: "/foo/method", - timeoutSupport: true, - want: newDurationP(5 * time.Second), - }, { - name: "timeout support disabled", - method: "/foo/method", - timeoutSupport: false, - want: nil, + name: "RDS setting", + method: "/foo/method", + want: newDurationP(5 * time.Second), }, { - name: "explicit zero in RDS; ignore LDS", - method: "/bar/method", - timeoutSupport: true, - want: nil, + name: "explicit zero in RDS; ignore LDS", + method: "/bar/method", + want: nil, }, { - name: "no config in RDS; fallback to LDS", - method: "/baz/method", - timeoutSupport: true, - want: newDurationP(time.Second), + name: "no config in RDS; fallback to LDS", + method: "/baz/method", + want: newDurationP(time.Second), }} for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - env.TimeoutSupport = tc.timeoutSupport req := iresolver.RPCInfo{ Method: tc.method, - Context: context.Background(), + Context: ctx, } res, err := cs.SelectConfig(req) if err != nil { - t.Errorf("Unexpected error from cs.SelectConfig(%v): %v", req, err) + t.Errorf("cs.SelectConfig(%v): %v", req, err) return } res.OnCommitted() got := res.MethodConfig.Timeout - if !reflect.DeepEqual(got, tc.want) { + if !cmp.Equal(got, tc.want) { t.Errorf("For method %q: res.MethodConfig.Timeout = %v; want %v", tc.method, got, tc.want) } }) } } -// TestXDSResolverDelayedOnCommitted tests that clusters remain in service +// TestResolverDelayedOnCommitted tests that clusters remain in service // config if RPCs are in flight. -func (s) TestXDSResolverDelayedOnCommitted(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, +func (s) TestResolverDelayedOnCommitted(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() + + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, }) - defer func() { - cancel() - xdsR.Close() - }() + if err != nil { + t.Fatal(err) + } + defer cleanup() + + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() + // Configure the management server with a good listener and route + // configuration resource. + ldsName := serviceName + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "old-cluster")}, + SkipValidation: true, + } ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"test-cluster-1": {Weight: 1}}}}, - }, - }, - }, nil) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } - gotState, err := tcc.stateCh.Receive(ctx) + // Read the update pushed by the resolver to the ClientConn. + val, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState := val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - - wantJSON := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - } - } - }}]}` - wantSCParsed := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON) + wantSCParsed := internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:old-cluster": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "old-cluster" + } + } + ] + } + } + } + } + ] +}`) if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed.Config)) + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } + // Make an RPC, but do not commit it yet. cs := iresolver.GetConfigSelector(rState) if cs == nil { - t.Fatal("received nil config selector") + t.Fatal("Received nil config selector in update from resolver") } - - res, err := cs.SelectConfig(iresolver.RPCInfo{Context: context.Background()}) + resOld, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) if err != nil { - t.Fatalf("Unexpected error from cs.SelectConfig(_): %v", err) + t.Fatalf("cs.SelectConfig(): %v", err) } - cluster := clustermanager.GetPickedClusterForTesting(res.Context) - if cluster != "test-cluster-1" { - t.Fatalf("") + if cluster := clustermanager.GetPickedClusterForTesting(resOld.Context); cluster != "cluster:old-cluster" { + t.Fatalf("Picked cluster is %q, want %q", cluster, "cluster:old-cluster") } - // delay res.OnCommitted() - // Perform TWO updates to ensure the old config selector does not hold a - // reference to test-cluster-1. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, - }, - }, - }, nil) - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, - }, - }, - }, nil) + // Delay resOld.OnCommitted(). As long as there are pending RPCs to removed + // clusters, they still appear in the service config. - tcc.stateCh.Receive(ctx) // Ignore the first update - gotState, err = tcc.stateCh.Receive(ctx) + // Update the route configuration resource on the management server to + // return a new cluster. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, "new-cluster")}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Read the update pushed by the resolver to the ClientConn and ensure the + // old cluster is present in the service config. Also ensure that the newly + // returned config selector does not hold a reference to the old cluster. + val, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState = gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - wantJSON2 := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "test-cluster-1":{ - "childPolicy":[{"cds_experimental":{"cluster":"test-cluster-1"}}] - }, - "NEW":{ - "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] - } - } - }}]}` - wantSCParsed2 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON2) - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed2.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed2.Config)) + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:old-cluster": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "old-cluster" + } + } + ] + }, + "cluster:new-cluster": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "new-cluster" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s\nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } - // Invoke OnCommitted; should lead to a service config update that deletes - // test-cluster-1. - res.OnCommitted() - - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{"NEW": {Weight: 1}}}}, - }, - }, - }, nil) - gotState, err = tcc.stateCh.Receive(ctx) - if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + cs = iresolver.GetConfigSelector(rState) + if cs == nil { + t.Fatal("Received nil config selector in update from resolver") } - rState = gotState.(resolver.State) - if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + resNew, err := cs.SelectConfig(iresolver.RPCInfo{Context: ctx, Method: "/service/method"}) + if err != nil { + t.Fatalf("cs.SelectConfig(): %v", err) } - wantJSON3 := `{"loadBalancingConfig":[{ - "xds_cluster_manager_experimental":{ - "children":{ - "NEW":{ - "childPolicy":[{"cds_experimental":{"cluster":"NEW"}}] - } - } - }}]}` - wantSCParsed3 := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)(wantJSON3) - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed3.Config) { - t.Errorf("ClientConn.UpdateState received different service config") - t.Error("got: ", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Fatal("want: ", cmp.Diff(nil, wantSCParsed3.Config)) + if cluster := clustermanager.GetPickedClusterForTesting(resNew.Context); cluster != "cluster:new-cluster" { + t.Fatalf("Picked cluster is %q, want %q", cluster, "cluster:new-cluster") } -} -// TestXDSResolverUpdates tests the cases where the resolver gets a good update -// after an error, and an error after the good update. -func (s) TestXDSResolverGoodUpdateAfterError(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, - }) - defer func() { - cancel() - xdsR.Close() - }() + // Invoke OnCommitted on the old RPC; should lead to a service config update + // that deletes the old cluster, as the old cluster no longer has any + // pending RPCs. + resOld.OnCommitted() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) - - // Invoke the watchAPI callback with a bad service update and wait for the - // ReportError method to be called on the ClientConn. - suErr := errors.New("bad serviceupdate") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) - - if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr { - t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr) - } - - // Invoke the watchAPI callback with a good service update and wait for the - // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ - { - Domains: []string{targetStr}, - Routes: []*client.Route{{Prefix: newStringP(""), WeightedClusters: map[string]xdsclient.WeightedCluster{cluster: {Weight: 1}}}}, - }, - }, - }, nil) - gotState, err := tcc.stateCh.Receive(ctx) + val, err = tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Timeout waiting for an update from the resolver: %v", err) } - rState := gotState.(resolver.State) + rState = val.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + t.Fatalf("Received error in service config: %v", rState.ServiceConfig.Err) } - - // Invoke the watchAPI callback with a bad service update and wait for the - // ReportError method to be called on the ClientConn. - suErr2 := errors.New("bad serviceupdate 2") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr2) - if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != nil || gotErrVal != suErr2 { - t.Fatalf("ClientConn.ReportError() received %v, want %v", gotErrVal, suErr2) + wantSCParsed = internal.ParseServiceConfig.(func(string) *serviceconfig.ParseResult)(` +{ + "loadBalancingConfig": [ + { + "xds_cluster_manager_experimental": { + "children": { + "cluster:new-cluster": { + "childPolicy": [ + { + "cds_experimental": { + "cluster": "new-cluster" + } + } + ] + } + } + } + } + ] +}`) + if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantSCParsed.Config) { + t.Fatalf("Got service config:\n%s \nWant service config:\n%s", cmp.Diff(nil, rState.ServiceConfig.Config), cmp.Diff(nil, wantSCParsed.Config)) } } -// TestXDSResolverResourceNotFoundError tests the cases where the resolver gets -// a ResourceNotFoundError. It should generate a service config picking -// weighted_target, but no child balancers. -func (s) TestXDSResolverResourceNotFoundError(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, - }) - defer func() { - cancel() - xdsR.Close() - }() +// TestResolverMultipleLDSUpdates tests the case where two LDS updates with the +// same RDS name to watch are received without an RDS in between. Those LDS +// updates shouldn't trigger a service config update. +func (s) TestResolverMultipleLDSUpdates(t *testing.T) { + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatal(err) + } + defer mgmtServer.Stop() - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{RouteConfigName: routeStr, HTTPFilters: routerFilterList}, nil) - waitForWatchRouteConfig(ctx, t, xdsC, routeStr) + // Create a bootstrap configuration specifying the above management server. + nodeID := uuid.New().String() + cleanup, err := xdsbootstrap.CreateFile(xdsbootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() - // Invoke the watchAPI callback with a bad service update and wait for the - // ReportError method to be called on the ClientConn. - suErr := xdsclient.NewErrorf(xdsclient.ErrorTypeResourceNotFound, "resource removed error") - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{}, suErr) + // Build an xDS resolver that uses the above bootstrap configuration + // Creating the xDS resolver should result in creation of the xDS client. + const serviceName = "my-service-client-side-xds" + tcc, rClose := buildResolverForTarget(t, resolver.Target{URL: *testutils.MustParseURL("xds:///" + serviceName)}) + defer rClose() - if gotErrVal, gotErr := tcc.errorCh.Receive(ctx); gotErr != context.DeadlineExceeded { - t.Fatalf("ClientConn.ReportError() received %v, %v, want channel recv timeout", gotErrVal, gotErr) + // Configure the management server with a listener resource, but no route + // configuration resource. + ldsName := serviceName + rdsName := "route-" + serviceName + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + SkipValidation: true, } - - ctx, cancel = context.WithTimeout(context.Background(), defaultTestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - gotState, err := tcc.stateCh.Receive(ctx) - if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) + } + + // Ensure there is no update from the resolver. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + gotState, err := tcc.stateCh.Receive(sCtx) + if err == nil { + t.Fatalf("Received update from resolver %v when none expected", gotState) + } + + // Configure the management server with a listener resource that points to + // the same route configuration resource but has different values for some + // other fields. There is still no route configuration resource on the + // management server. + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: rdsName, + }}, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(1 * time.Second), + }, + }) + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{{ + Name: ldsName, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + }}, + SkipValidation: true, } - rState := gotState.(resolver.State) - wantParsedConfig := internal.ParseServiceConfigForTesting.(func(string) *serviceconfig.ParseResult)("{}") - if !internal.EqualServiceConfigForTesting(rState.ServiceConfig.Config, wantParsedConfig.Config) { - t.Error("ClientConn.UpdateState got wrong service config") - t.Errorf("gotParsed: %s", cmp.Diff(nil, rState.ServiceConfig.Config)) - t.Errorf("wantParsed: %s", cmp.Diff(nil, wantParsedConfig.Config)) + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatal(err) } - if err := rState.ServiceConfig.Err; err != nil { - t.Fatalf("ClientConn.UpdateState received error in service config: %v", rState.ServiceConfig.Err) + + // Ensure that there is no update from the resolver. + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + gotState, err = tcc.stateCh.Receive(sCtx) + if err == nil { + t.Fatalf("Received update from resolver %v when none expected", gotState) } } @@ -1089,7 +1801,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { var path []string testCases := []struct { name string - ldsFilters []xdsclient.HTTPFilter + ldsFilters []xdsresource.HTTPFilter vhOverrides map[string]httpfilter.FilterConfig rtOverrides map[string]httpfilter.FilterConfig clOverrides map[string]httpfilter.FilterConfig @@ -1099,7 +1811,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }{ { name: "no router filter", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, }, rpcRes: map[string][][]string{ @@ -1111,7 +1823,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "ignored after router filter", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, routerFilter, {Name: "foo2", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo2"}}, @@ -1129,7 +1841,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "NewStream error; ensure earlier interceptor Done is still called", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1"}}, {Name: "bar", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "bar1", newStreamErr: errors.New("bar newstream err")}}, routerFilter, @@ -1146,7 +1858,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { }, { name: "all overrides", - ldsFilters: []xdsclient.HTTPFilter{ + ldsFilters: []xdsresource.HTTPFilter{ {Name: "foo", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "foo1", newStreamErr: errors.New("this is overridden to nil")}}, {Name: "bar", Filter: &filterBuilder{path: &path}, Config: filterCfg{s: "bar1"}}, routerFilter, @@ -1171,20 +1883,15 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { for i, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - xdsC := fakeclient.NewClient() - xdsR, tcc, cancel := testSetup(t, setupOpts{ - xdsClientFunc: func() (xdsClientInterface, error) { return xdsC, nil }, - }) - defer func() { - cancel() - xdsR.Close() - }() + xdsR, xdsC, tcc, cancel := testSetup(t, setupOpts{target: target}) + defer xdsR.Close() + defer cancel() ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() waitForWatchListener(ctx, t, xdsC, targetStr) - xdsC.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: routeStr, HTTPFilters: tc.ldsFilters, }, nil) @@ -1193,21 +1900,21 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { } defer func(oldNewWRR func() wrr.WRR) { newWRR = oldNewWRR }(newWRR) - newWRR = xdstestutils.NewTestWRR + newWRR = testutils.NewTestWRR // Invoke the watchAPI callback with a good service update and wait for the // UpdateState method to be called on the ClientConn. - xdsC.InvokeWatchRouteConfigCallback(xdsclient.RouteConfigUpdate{ - VirtualHosts: []*xdsclient.VirtualHost{ + xdsC.InvokeWatchRouteConfigCallback("", xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ { Domains: []string{targetStr}, - Routes: []*client.Route{{ - Prefix: newStringP("1"), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Routes: []*xdsresource.Route{{ + Prefix: newStringP("1"), WeightedClusters: map[string]xdsresource.WeightedCluster{ "A": {Weight: 1}, "B": {Weight: 1}, }, }, { - Prefix: newStringP("2"), WeightedClusters: map[string]xdsclient.WeightedCluster{ + Prefix: newStringP("2"), WeightedClusters: map[string]xdsresource.WeightedCluster{ "A": {Weight: 1}, "B": {Weight: 1, HTTPFilterConfigOverride: tc.clOverrides}, }, @@ -1220,7 +1927,7 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { gotState, err := tcc.stateCh.Receive(ctx) if err != nil { - t.Fatalf("ClientConn.UpdateState returned error: %v", err) + t.Fatalf("Error waiting for UpdateState to be called: %v", err) } rState := gotState.(resolver.State) if err := rState.ServiceConfig.Err; err != nil { @@ -1293,18 +2000,6 @@ func (s) TestXDSResolverHTTPFilters(t *testing.T) { } } -func replaceRandNumGenerator(start int64) func() { - nextInt := start - grpcrandInt63n = func(int64) (ret int64) { - ret = nextInt - nextInt++ - return - } - return func() { - grpcrandInt63n = grpcrand.Int63n - } -} - func newDurationP(d time.Duration) *time.Duration { return &d } diff --git a/xds/internal/server/conn_wrapper.go b/xds/internal/server/conn_wrapper.go index 359674417dcf..ec6da32fad18 100644 --- a/xds/internal/server/conn_wrapper.go +++ b/xds/internal/server/conn_wrapper.go @@ -27,23 +27,25 @@ import ( "google.golang.org/grpc/credentials/tls/certprovider" xdsinternal "google.golang.org/grpc/internal/credentials/xds" - xdsclient "google.golang.org/grpc/xds/internal/client" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // connWrapper is a thin wrapper around a net.Conn returned by Accept(). It // provides the following additional functionality: -// 1. A way to retrieve the configured deadline. This is required by the -// ServerHandshake() method of the xdsCredentials when it attempts to read -// key material from the certificate providers. -// 2. Implements the XDSHandshakeInfo() method used by the xdsCredentials to -// retrieve the configured certificate providers. -// 3. xDS filter_chain matching logic to select appropriate security -// configuration for the incoming connection. +// 1. A way to retrieve the configured deadline. This is required by the +// ServerHandshake() method of the xdsCredentials when it attempts to read +// key material from the certificate providers. +// 2. Implements the XDSHandshakeInfo() method used by the xdsCredentials to +// retrieve the configured certificate providers. +// 3. xDS filter_chain matching logic to select appropriate security +// configuration for the incoming connection. type connWrapper struct { net.Conn + // The specific filter chain picked for handling this connection. + filterChain *xdsresource.FilterChain + // A reference fo the listenerWrapper on which this connection was accepted. - // Used to access the filter chains during the server-side handshake. parent *listenerWrapper // The certificate providers created for this connection. @@ -56,6 +58,15 @@ type connWrapper struct { // completing the HTTP2 handshake. deadlineMu sync.Mutex deadline time.Time + + // The virtual hosts with matchable routes and instantiated HTTP Filters per + // route. + virtualHosts []xdsresource.VirtualHostWithInterceptors +} + +// VirtualHosts returns the virtual hosts to be used for server side routing. +func (c *connWrapper) VirtualHosts() []xdsresource.VirtualHostWithInterceptors { + return c.virtualHosts } // SetDeadline makes a copy of the passed in deadline and forwards the call to @@ -90,12 +101,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { return nil, errors.New("user has not configured xDS credentials") } - fc := c.getMatchingFilterChain() - if fc == nil { - return nil, errors.New("no matching filter chain for incoming connection") - } - - if fc.SecurityCfg == nil { + if c.filterChain.SecurityCfg == nil { // If the security config is empty, this means that the control plane // did not provide any security configuration and therefore we should // return an empty HandshakeInfo here so that the xdsCreds can use the @@ -105,14 +111,15 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs // Identity provider name is mandatory on the server-side, and this is - // enforced when the resource is received at the xdsClient layer. - ip, err := buildProviderFunc(cpc, fc.SecurityCfg.IdentityInstanceName, fc.SecurityCfg.IdentityCertName, true, false) + // enforced when the resource is received at the XDSClient layer. + secCfg := c.filterChain.SecurityCfg + ip, err := buildProviderFunc(cpc, secCfg.IdentityInstanceName, secCfg.IdentityCertName, true, false) if err != nil { return nil, err } // Root provider name is optional and required only when doing mTLS. var rp certprovider.Provider - if instance, cert := fc.SecurityCfg.RootInstanceName, fc.SecurityCfg.RootCertName; instance != "" { + if instance, cert := secCfg.RootInstanceName, secCfg.RootCertName; instance != "" { rp, err = buildProviderFunc(cpc, instance, cert, false, true) if err != nil { return nil, err @@ -121,29 +128,12 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { c.identityProvider = ip c.rootProvider = rp - xdsHI := xdsinternal.NewHandshakeInfo(c.identityProvider, c.rootProvider) - xdsHI.SetRequireClientCert(fc.SecurityCfg.RequireClientCert) + xdsHI := xdsinternal.NewHandshakeInfo(c.rootProvider, c.identityProvider) + xdsHI.SetRequireClientCert(secCfg.RequireClientCert) return xdsHI, nil } -// The logic specified in the documentation around the xDS FilterChainMatch -// proto mentions 8 criteria to match on. gRPC does not support 4 of those, and -// hence we got rid of them at the time of parsing the received Listener -// resource. Here we use the remaining 4 criteria to find a matching filter -// chain: Destination IP address, Source type, Source IP address, Source port. -func (c *connWrapper) getMatchingFilterChain() *xdsclient.FilterChain { - c.parent.mu.RLock() - defer c.parent.mu.RUnlock() - - // TODO: Do the filter chain match here and return the best match. - // For now, we simply return the first filter_chain in the list or the - // default one. - if len(c.parent.filterChains) == 0 { - return c.parent.defaultFilterChain - } - return c.parent.filterChains[0] -} - +// Close closes the providers and the underlying connection. func (c *connWrapper) Close() error { if c.identityProvider != nil { c.identityProvider.Close() diff --git a/xds/internal/server/listener_wrapper.go b/xds/internal/server/listener_wrapper.go index 078b2112581d..9f5b2ecafe5f 100644 --- a/xds/internal/server/listener_wrapper.go +++ b/xds/internal/server/listener_wrapper.go @@ -21,27 +21,56 @@ package server import ( + "errors" "fmt" "net" "sync" + "sync/atomic" + "time" + "unsafe" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -var logger = grpclog.Component("xds") +var ( + logger = grpclog.Component("xds") -func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p]", p)) -} + // Backoff strategy for temporary errors received from Accept(). If this + // needs to be configurable, we can inject it through ListenerWrapperParams. + bs = internalbackoff.Exponential{Config: backoff.Config{ + BaseDelay: 5 * time.Millisecond, + Multiplier: 2.0, + MaxDelay: 1 * time.Second, + }} + backoffFunc = bs.Backoff +) + +// ServingModeCallback is the callback that users can register to get notified +// about the server's serving mode changes. The callback is invoked with the +// address of the listener and its new mode. The err parameter is set to a +// non-nil error if the server has transitioned into not-serving mode. +type ServingModeCallback func(addr net.Addr, mode connectivity.ServingMode, err error) -// XDSClientInterface wraps the methods on the xdsClient which are required by +// DrainCallback is the callback that an xDS-enabled server registers to get +// notified about updates to the Listener configuration. The server is expected +// to gracefully shutdown existing connections, thereby forcing clients to +// reconnect and have the new configuration applied to the newly created +// connections. +type DrainCallback func(addr net.Addr) + +// XDSClient wraps the methods on the XDSClient which are required by // the listenerWrapper. -type XDSClientInterface interface { - WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() +type XDSClient interface { + WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() BootstrapConfig() *bootstrap.Config } @@ -54,34 +83,54 @@ type ListenerWrapperParams struct { // XDSCredsInUse specifies whether or not the user expressed interest to // receive security configuration from the control plane. XDSCredsInUse bool - // XDSClient provides the functionality from the xdsClient required here. - XDSClient XDSClientInterface + // XDSClient provides the functionality from the XDSClient required here. + XDSClient XDSClient + // ModeCallback is the callback to invoke when the serving mode changes. + ModeCallback ServingModeCallback + // DrainCallback is the callback to invoke when the Listener gets a LDS + // update. + DrainCallback DrainCallback } // NewListenerWrapper creates a new listenerWrapper with params. It returns a // net.Listener and a channel which is written to, indicating that the former is // ready to be passed to grpc.Serve(). +// +// Only TCP listeners are supported. func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan struct{}) { lw := &listenerWrapper{ - Listener: params.Listener, - name: params.ListenerResourceName, - xdsCredsInUse: params.XDSCredsInUse, - xdsC: params.XDSClient, + Listener: params.Listener, + name: params.ListenerResourceName, + xdsCredsInUse: params.XDSCredsInUse, + xdsC: params.XDSClient, + modeCallback: params.ModeCallback, + drainCallback: params.DrainCallback, + isUnspecifiedAddr: params.Listener.Addr().(*net.TCPAddr).IP.IsUnspecified(), - closed: grpcsync.NewEvent(), - goodUpdate: grpcsync.NewEvent(), + mode: connectivity.ServingModeStarting, + closed: grpcsync.NewEvent(), + goodUpdate: grpcsync.NewEvent(), + ldsUpdateCh: make(chan ldsUpdateWithError, 1), + rdsUpdateCh: make(chan rdsHandlerUpdate, 1), } - lw.logger = prefixLogger(lw) + lw.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[xds-server-listener %p] ", lw)) - cancelWatch := lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) - lw.logger.Infof("Watch started on resource name %v", lw.name) - lw.cancelWatch = func() { - cancelWatch() - lw.logger.Infof("Watch cancelled on resource name %v", lw.name) - } + // Serve() verifies that Addr() returns a valid TCPAddr. So, it is safe to + // ignore the error from SplitHostPort(). + lisAddr := lw.Listener.Addr().String() + lw.addr, lw.port, _ = net.SplitHostPort(lisAddr) + + lw.rdsHandler = newRDSHandler(lw.xdsC, lw.rdsUpdateCh) + lw.cancelWatch = lw.xdsC.WatchListener(lw.name, lw.handleListenerUpdate) + go lw.run() return lw, lw.goodUpdate.Done() } +type ldsUpdateWithError struct { + update xdsresource.ListenerUpdate + err error +} + // listenerWrapper wraps the net.Listener associated with the listening address // passed to Serve(). It also contains all other state associated with this // particular invocation of Serve(). @@ -89,19 +138,26 @@ type listenerWrapper struct { net.Listener logger *internalgrpclog.PrefixLogger - // TODO: Maintain serving state of this listener. - name string xdsCredsInUse bool - xdsC XDSClientInterface + xdsC XDSClient cancelWatch func() + modeCallback ServingModeCallback + drainCallback DrainCallback + + // Set to true if the listener is bound to the IP_ANY address (which is + // "0.0.0.0" for IPv4 and "::" for IPv6). + isUnspecifiedAddr bool + // Listening address and port. Used to validate the socket address in the + // Listener resource received from the control plane. + addr, port string // This is used to notify that a good update has been received and that // Serve() can be invoked on the underlying gRPC server. Using an event // instead of a vanilla channel simplifies the update handler as it need not // keep track of whether the received update is the first one or not. goodUpdate *grpcsync.Event - // A small race exists in the xdsClient code between the receipt of an xDS + // A small race exists in the XDSClient code between the receipt of an xDS // response and the user cancelling the associated watch. In this window, // the registered callback may be invoked after the watch is canceled, and // the user is expected to work around this. This event signifies that the @@ -109,24 +165,139 @@ type listenerWrapper struct { // updates received in the callback if this event has fired. closed *grpcsync.Event - // Filter chains received as part of the last good update. The reason for - // using an rw lock here is that this field will be read by all connections - // during their server-side handshake (in the hot path), but writes to this - // happen rarely (when we get a Listener resource update). - mu sync.RWMutex - filterChains []*xdsclient.FilterChain - defaultFilterChain *xdsclient.FilterChain + // mu guards access to the current serving mode and the filter chains. The + // reason for using an rw lock here is that these fields are read in + // Accept() for all incoming connections, but writes happen rarely (when we + // get a Listener resource update). + mu sync.RWMutex + // Current serving mode. + mode connectivity.ServingMode + // Filter chains received as part of the last good update. + filterChains *xdsresource.FilterChainManager + + // rdsHandler is used for any dynamic RDS resources specified in a LDS + // update. + rdsHandler *rdsHandler + // rdsUpdates are the RDS resources received from the management + // server, keyed on the RouteName of the RDS resource. + rdsUpdates unsafe.Pointer // map[string]xdsclient.RouteConfigUpdate + // ldsUpdateCh is a channel for XDSClient LDS updates. + ldsUpdateCh chan ldsUpdateWithError + // rdsUpdateCh is a channel for XDSClient RDS updates. + rdsUpdateCh chan rdsHandlerUpdate } // Accept blocks on an Accept() on the underlying listener, and wraps the // returned net.connWrapper with the configured certificate providers. func (l *listenerWrapper) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return nil, err + var retries int + for { + conn, err := l.Listener.Accept() + if err != nil { + // Temporary() method is implemented by certain error types returned + // from the net package, and it is useful for us to not shutdown the + // server in these conditions. The listen queue being full is one + // such case. + if ne, ok := err.(interface{ Temporary() bool }); !ok || !ne.Temporary() { + return nil, err + } + retries++ + timer := time.NewTimer(backoffFunc(retries)) + select { + case <-timer.C: + case <-l.closed.Done(): + timer.Stop() + // Continuing here will cause us to call Accept() again + // which will return a non-temporary error. + continue + } + continue + } + // Reset retries after a successful Accept(). + retries = 0 + + // Since the net.Conn represents an incoming connection, the source and + // destination address can be retrieved from the local address and + // remote address of the net.Conn respectively. + destAddr, ok1 := conn.LocalAddr().(*net.TCPAddr) + srcAddr, ok2 := conn.RemoteAddr().(*net.TCPAddr) + if !ok1 || !ok2 { + // If the incoming connection is not a TCP connection, which is + // really unexpected since we check whether the provided listener is + // a TCP listener in Serve(), we return an error which would cause + // us to stop serving. + return nil, fmt.Errorf("received connection with non-TCP address (local: %T, remote %T)", conn.LocalAddr(), conn.RemoteAddr()) + } + + l.mu.RLock() + if l.mode == connectivity.ServingModeNotServing { + // Close connections as soon as we accept them when we are in + // "not-serving" mode. Since we accept a net.Listener from the user + // in Serve(), we cannot close the listener when we move to + // "not-serving". Closing the connection immediately upon accepting + // is one of the other ways to implement the "not-serving" mode as + // outlined in gRFC A36. + l.mu.RUnlock() + conn.Close() + continue + } + fc, err := l.filterChains.Lookup(xdsresource.FilterChainLookupParams{ + IsUnspecifiedListener: l.isUnspecifiedAddr, + DestAddr: destAddr.IP, + SourceAddr: srcAddr.IP, + SourcePort: srcAddr.Port, + }) + l.mu.RUnlock() + if err != nil { + // When a matching filter chain is not found, we close the + // connection right away, but do not return an error back to + // `grpc.Serve()` from where this Accept() was invoked. Returning an + // error to `grpc.Serve()` causes the server to shutdown. If we want + // to avoid the server from shutting down, we would need to return + // an error type which implements the `Temporary() bool` method, + // which is invoked by `grpc.Serve()` to see if the returned error + // represents a temporary condition. In the case of a temporary + // error, `grpc.Serve()` method sleeps for a small duration and + // therefore ends up blocking all connection attempts during that + // time frame, which is also not ideal for an error like this. + l.logger.Warningf("Connection from %s to %s failed to find any matching filter chain", conn.RemoteAddr().String(), conn.LocalAddr().String()) + conn.Close() + continue + } + if !envconfig.XDSRBAC { + return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil + } + var rc xdsresource.RouteConfigUpdate + if fc.InlineRouteConfig != nil { + rc = *fc.InlineRouteConfig + } else { + rcPtr := atomic.LoadPointer(&l.rdsUpdates) + rcuPtr := (*map[string]xdsresource.RouteConfigUpdate)(rcPtr) + // This shouldn't happen, but this error protects against a panic. + if rcuPtr == nil { + return nil, errors.New("route configuration pointer is nil") + } + rcu := *rcuPtr + rc = rcu[fc.RouteConfigName] + } + // The filter chain will construct a usuable route table on each + // connection accept. This is done because preinstantiating every route + // table before it is needed for a connection would potentially lead to + // a lot of cpu time and memory allocated for route tables that will + // never be used. There was also a thought to cache this configuration, + // and reuse it for the next accepted connection. However, this would + // lead to a lot of code complexity (RDS Updates for a given route name + // can come it at any time), and connections aren't accepted too often, + // so this reinstantation of the Route Configuration is an acceptable + // tradeoff for simplicity. + vhswi, err := fc.ConstructUsableRouteConfiguration(rc) + if err != nil { + l.logger.Warningf("Failed to construct usable route configuration: %v", err) + conn.Close() + continue + } + return &connWrapper{Conn: conn, filterChain: fc, parent: l, virtualHosts: vhswi}, nil } - // TODO: Close connections if in "non-serving" state. - return &connWrapper{Conn: c, parent: l}, nil } // Close closes the underlying listener. It also cancels the xDS watch @@ -138,63 +309,134 @@ func (l *listenerWrapper) Close() error { if l.cancelWatch != nil { l.cancelWatch() } + l.rdsHandler.close() return nil } -func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, err error) { +// run is a long running goroutine which handles all xds updates. LDS and RDS +// push updates onto a channel which is read and acted upon from this goroutine. +func (l *listenerWrapper) run() { + for { + select { + case <-l.closed.Done(): + return + case u := <-l.ldsUpdateCh: + l.handleLDSUpdate(u) + case u := <-l.rdsUpdateCh: + l.handleRDSUpdate(u) + } + } +} + +// handleLDSUpdate is the callback which handles LDS Updates. It writes the +// received update to the update channel, which is picked up by the run +// goroutine. +func (l *listenerWrapper) handleListenerUpdate(update xdsresource.ListenerUpdate, err error) { if l.closed.HasFired() { l.logger.Warningf("Resource %q received update: %v with error: %v, after listener was closed", l.name, update, err) return } + // Remove any existing entry in ldsUpdateCh and replace with the new one, as the only update + // listener cares about is most recent update. + select { + case <-l.ldsUpdateCh: + default: + } + l.ldsUpdateCh <- ldsUpdateWithError{update: update, err: err} +} + +// handleRDSUpdate handles a full rds update from rds handler. On a successful +// update, the server will switch to ServingModeServing as the full +// configuration (both LDS and RDS) has been received. +func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) { + if l.closed.HasFired() { + l.logger.Warningf("RDS received update: %v with error: %v, after listener was closed", update.updates, update.err) + return + } + if update.err != nil { + l.logger.Warningf("Received error for rds names specified in resource %q: %+v", l.name, update.err) + if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { + l.switchMode(nil, connectivity.ServingModeNotServing, update.err) + } + // For errors which are anything other than "resource-not-found", we + // continue to use the old configuration. + return + } + atomic.StorePointer(&l.rdsUpdates, unsafe.Pointer(&update.updates)) + + l.switchMode(l.filterChains, connectivity.ServingModeServing, nil) + l.goodUpdate.Fire() +} - // TODO: Handle resource-not-found errors by moving to not-serving state. - if err != nil { - // We simply log an error here and hope we get a successful update - // in the future. The error could be because of a timeout or an - // actual error, like the requested resource not found. In any case, - // it is fine for the server to hang indefinitely until Stop() is - // called. - l.logger.Warningf("Received error for resource %q: %+v", l.name, err) +func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) { + if update.err != nil { + l.logger.Warningf("Received error for resource %q: %+v", l.name, update.err) + if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound { + l.switchMode(nil, connectivity.ServingModeNotServing, update.err) + } + // For errors which are anything other than "resource-not-found", we + // continue to use the old configuration. return } - l.logger.Infof("Received update for resource %q: %+v", l.name, update) // Make sure that the socket address on the received Listener resource // matches the address of the net.Listener passed to us by the user. This - // check is done here instead of at the xdsClient layer because of the + // check is done here instead of at the XDSClient layer because of the // following couple of reasons: - // - xdsClient cannot know the listening address of every listener in the + // - XDSClient cannot know the listening address of every listener in the // system, and hence cannot perform this check. // - this is a very context-dependent check and only the server has the // appropriate context to perform this check. // - // What this means is that the xdsClient has ACKed a resource which is going - // to push the server into a "not serving" state. This is not ideal, but - // this is what we have decided to do. See gRPC A36 for more details. - // TODO(easwars): Switch to "not serving" if the host:port does not match. - lisAddr := l.Listener.Addr().String() - addr, port, err := net.SplitHostPort(lisAddr) - if err != nil { - // This is never expected to return a non-nil error since we have made - // sure that the listener is a TCP listener at the beginning of Serve(). - // This is simply paranoia. - l.logger.Warningf("Local listener address %q failed to parse as IP:port: %v", lisAddr, err) + // What this means is that the XDSClient has ACKed a resource which can push + // the server into a "not serving" mode. This is not ideal, but this is + // what we have decided to do. See gRPC A36 for more details. + ilc := update.update.InboundListenerCfg + if ilc.Address != l.addr || ilc.Port != l.port { + l.switchMode(nil, connectivity.ServingModeNotServing, fmt.Errorf("address (%s:%s) in Listener update does not match listening address: (%s:%s)", ilc.Address, ilc.Port, l.addr, l.port)) return } - ilc := update.InboundListenerCfg - if ilc == nil { - l.logger.Warningf("Missing host:port in Listener updates") - return + + // "Updates to a Listener cause all older connections on that Listener to be + // gracefully shut down with a grace period of 10 minutes for long-lived + // RPC's, such that clients will reconnect and have the updated + // configuration apply." - A36 Note that this is not the same as moving the + // Server's state to ServingModeNotServing. That prevents new connections + // from being accepted, whereas here we simply want the clients to reconnect + // to get the updated configuration. + if envconfig.XDSRBAC { + if l.drainCallback != nil { + l.drainCallback(l.Listener.Addr()) + } } - if ilc.Address != addr || ilc.Port != port { - l.logger.Warningf("Received host:port (%s:%d) in Listener update does not match local listening address: %s", ilc.Address, ilc.Port, lisAddr) - return + l.rdsHandler.updateRouteNamesToWatch(ilc.FilterChains.RouteConfigNames) + // If there are no dynamic RDS Configurations still needed to be received + // from the management server, this listener has all the configuration + // needed, and is ready to serve. + if len(ilc.FilterChains.RouteConfigNames) == 0 { + l.switchMode(ilc.FilterChains, connectivity.ServingModeServing, nil) + l.goodUpdate.Fire() } +} +// switchMode updates the value of serving mode and filter chains stored in the +// listenerWrapper. And if the serving mode has changed, it invokes the +// registered mode change callback. +func (l *listenerWrapper) switchMode(fcs *xdsresource.FilterChainManager, newMode connectivity.ServingMode, err error) { l.mu.Lock() - l.filterChains = ilc.FilterChains - l.defaultFilterChain = ilc.DefaultFilterChain - l.mu.Unlock() - l.goodUpdate.Fire() - // TODO: Move to serving state on receipt of a good response. + defer l.mu.Unlock() + + l.filterChains = fcs + if l.mode == newMode && l.mode == connectivity.ServingModeServing { + // Redundant updates are suppressed only when we are SERVING and the new + // mode is also SERVING. In the other case (where we are NOT_SERVING and the + // new mode is also NOT_SERVING), the update is not suppressed as: + // 1. the error may have change + // 2. it provides a timestamp of the last backoff attempt + return + } + l.mode = newMode + if l.modeCallback != nil { + l.modeCallback(l.Listener.Addr(), newMode, err) + } } diff --git a/xds/internal/server/listener_wrapper_test.go b/xds/internal/server/listener_wrapper_test.go new file mode 100644 index 000000000000..7d246f6373eb --- /dev/null +++ b/xds/internal/server/listener_wrapper_test.go @@ -0,0 +1,486 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "context" + "errors" + "net" + "strconv" + "testing" + "time" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + + _ "google.golang.org/grpc/xds/internal/httpfilter/router" +) + +const ( + fakeListenerHost = "0.0.0.0" + fakeListenerPort = 50051 + testListenerResourceName = "lds.target.1.2.3.4:1111" + defaultTestTimeout = 1 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +var listenerWithRouteConfiguration = &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourcePorts: []uint32{80}, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-1", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + }, +} + +var listenerWithFilterChains = &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourcePorts: []uint32{80}, + }, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + }, +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +type tempError struct{} + +func (tempError) Error() string { + return "listenerWrapper test temporary error" +} + +func (tempError) Temporary() bool { + return true +} + +// connAndErr wraps a net.Conn and an error. +type connAndErr struct { + conn net.Conn + err error +} + +// fakeListener allows the user to inject conns returned by Accept(). +type fakeListener struct { + acceptCh chan connAndErr + closeCh *testutils.Channel +} + +func (fl *fakeListener) Accept() (net.Conn, error) { + cne, ok := <-fl.acceptCh + if !ok { + return nil, errors.New("a non-temporary error") + } + return cne.conn, cne.err +} + +func (fl *fakeListener) Close() error { + fl.closeCh.Send(nil) + return nil +} + +func (fl *fakeListener) Addr() net.Addr { + return &net.TCPAddr{ + IP: net.IPv4(0, 0, 0, 0), + Port: fakeListenerPort, + } +} + +// fakeConn overrides LocalAddr, RemoteAddr and Close methods. +type fakeConn struct { + net.Conn + local, remote net.Addr + closeCh *testutils.Channel +} + +func (fc *fakeConn) LocalAddr() net.Addr { + return fc.local +} + +func (fc *fakeConn) RemoteAddr() net.Addr { + return fc.remote +} + +func (fc *fakeConn) Close() error { + fc.closeCh.Send(nil) + return nil +} + +func newListenerWrapper(t *testing.T) (*listenerWrapper, <-chan struct{}, *fakeclient.Client, *fakeListener, func()) { + t.Helper() + + // Create a listener wrapper with a fake listener and fake XDSClient and + // verify that it extracts the host and port from the passed in listener. + lis := &fakeListener{ + acceptCh: make(chan connAndErr, 1), + closeCh: testutils.NewChannel(), + } + xdsC := fakeclient.NewClient() + lParams := ListenerWrapperParams{ + Listener: lis, + ListenerResourceName: testListenerResourceName, + XDSClient: xdsC, + } + l, readyCh := NewListenerWrapper(lParams) + if l == nil { + t.Fatalf("NewListenerWrapper(%+v) returned nil", lParams) + } + lw, ok := l.(*listenerWrapper) + if !ok { + t.Fatalf("NewListenerWrapper(%+v) returned listener of type %T want *listenerWrapper", lParams, l) + } + if lw.addr != fakeListenerHost || lw.port != strconv.Itoa(fakeListenerPort) { + t.Fatalf("listenerWrapper has host:port %s:%s, want %s:%d", lw.addr, lw.port, fakeListenerHost, fakeListenerPort) + } + return lw, readyCh, xdsC, lis, func() { l.Close() } +} + +func (s) TestNewListenerWrapper(t *testing.T) { + _, readyCh, xdsC, _, cleanup := newListenerWrapper(t) + defer cleanup() + + // Verify that the listener wrapper registers a listener watch for the + // expected Listener resource name. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + name, err := xdsC.WaitForWatchListener(ctx) + if err != nil { + t.Fatalf("error when waiting for a watch on a Listener resource: %v", err) + } + if name != testListenerResourceName { + t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", name, testListenerResourceName) + } + + // Push an error to the listener update handler. + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, errors.New("bad listener update")) + timer := time.NewTimer(defaultTestShortTimeout) + select { + case <-timer.C: + timer.Stop() + case <-readyCh: + t.Fatalf("ready channel written to after receipt of a bad Listener update") + } + + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } + + // Push an update whose address does not match the address to which our + // listener is bound, and verify that the ready channel is not written to. + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ + Address: "10.0.0.1", + Port: "50051", + FilterChains: fcm, + }}, nil) + timer = time.NewTimer(defaultTestShortTimeout) + select { + case <-timer.C: + timer.Stop() + case <-readyCh: + t.Fatalf("ready channel written to after receipt of a bad Listener update") + } + + // Push a good update, and verify that the ready channel is written to. + // Since there are no dynamic RDS updates needed to be received, the + // ListenerWrapper does not have to wait for anything else before telling + // that it is ready. + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ + Address: fakeListenerHost, + Port: strconv.Itoa(fakeListenerPort), + FilterChains: fcm, + }}, nil) + + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for the ready channel to be written to after receipt of a good Listener update") + case <-readyCh: + } +} + +// TestNewListenerWrapperWithRouteUpdate tests the scenario where the listener +// gets built, starts a watch, that watch returns a list of Route Names to +// return, than receives an update from the rds handler. Only after receiving +// the update from the rds handler should it move the server to +// ServingModeServing. +func (s) TestNewListenerWrapperWithRouteUpdate(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + _, readyCh, xdsC, _, cleanup := newListenerWrapper(t) + defer cleanup() + + // Verify that the listener wrapper registers a listener watch for the + // expected Listener resource name. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + name, err := xdsC.WaitForWatchListener(ctx) + if err != nil { + t.Fatalf("error when waiting for a watch on a Listener resource: %v", err) + } + if name != testListenerResourceName { + t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", name, testListenerResourceName) + } + fcm, err := xdsresource.NewFilterChainManager(listenerWithRouteConfiguration) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } + + // Push a good update which contains a Filter Chain that specifies dynamic + // RDS Resources that need to be received. This should ping rds handler + // about which rds names to start, which will eventually start a watch on + // xds client for rds name "route-1". + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ + Address: fakeListenerHost, + Port: strconv.Itoa(fakeListenerPort), + FilterChains: fcm, + }}, nil) + + // This should start a watch on xds client for rds name "route-1". + routeName, err := xdsC.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("error when waiting for a watch on a Route resource: %v", err) + } + if routeName != "route-1" { + t.Fatalf("listenerWrapper registered a lds watch on %s, want %s", routeName, "route-1") + } + + // This shouldn't invoke good update channel, as has not received rds updates yet. + timer := time.NewTimer(defaultTestShortTimeout) + select { + case <-timer.C: + timer.Stop() + case <-readyCh: + t.Fatalf("ready channel written to without rds configuration specified") + } + + // Invoke rds callback for the started rds watch. This valid rds callback + // should trigger the listener wrapper to fire GoodUpdate, as it has + // received both it's LDS Configuration and also RDS Configuration, + // specified in LDS Configuration. + xdsC.InvokeWatchRouteConfigCallback("route-1", xdsresource.RouteConfigUpdate{}, nil) + + // All of the xDS updates have completed, so can expect to send a ping on + // good update channel. + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for the ready channel to be written to after receipt of a good rds update") + case <-readyCh: + } +} + +func (s) TestListenerWrapper_Accept(t *testing.T) { + boCh := testutils.NewChannel() + origBackoffFunc := backoffFunc + backoffFunc = func(v int) time.Duration { + boCh.Send(v) + return 0 + } + defer func() { backoffFunc = origBackoffFunc }() + + lw, readyCh, xdsC, lis, cleanup := newListenerWrapper(t) + defer cleanup() + + // Push a good update with a filter chain which accepts local connections on + // 192.168.0.0/16 subnet and port 80. + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } + xdsC.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + InboundListenerCfg: &xdsresource.InboundListenerConfig{ + Address: fakeListenerHost, + Port: strconv.Itoa(fakeListenerPort), + FilterChains: fcm, + }}, nil) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + defer close(lis.acceptCh) + select { + case <-ctx.Done(): + t.Fatalf("timeout waiting for the ready channel to be written to after receipt of a good Listener update") + case <-readyCh: + } + + // Push a non-temporary error into Accept(). + nonTempErr := errors.New("a non-temporary error") + lis.acceptCh <- connAndErr{err: nonTempErr} + if _, err := lw.Accept(); err != nonTempErr { + t.Fatalf("listenerWrapper.Accept() returned error: %v, want: %v", err, nonTempErr) + } + + // Invoke Accept() in a goroutine since we expect it to swallow: + // 1. temporary errors returned from the underlying listener + // 2. errors related to finding a matching filter chain for the incoming + // connection. + errCh := testutils.NewChannel() + go func() { + conn, err := lw.Accept() + if err != nil { + errCh.Send(err) + return + } + if _, ok := conn.(*connWrapper); !ok { + errCh.Send(errors.New("listenerWrapper.Accept() returned a Conn of type %T, want *connWrapper")) + return + } + errCh.Send(nil) + }() + + // Push a temporary error into Accept() and verify that it backs off. + lis.acceptCh <- connAndErr{err: tempError{}} + if _, err := boCh.Receive(ctx); err != nil { + t.Fatalf("error when waiting for Accept() to backoff on temporary errors: %v", err) + } + + // Push a fakeConn which does not match any filter chains configured on the + // received Listener resource. Verify that the conn is closed. + fc := &fakeConn{ + local: &net.TCPAddr{IP: net.IPv4(192, 168, 1, 2), Port: 79}, + remote: &net.TCPAddr{IP: net.IPv4(10, 1, 1, 1), Port: 80}, + closeCh: testutils.NewChannel(), + } + lis.acceptCh <- connAndErr{conn: fc} + if _, err := fc.closeCh.Receive(ctx); err != nil { + t.Fatalf("error when waiting for conn to be closed on no filter chain match: %v", err) + } + + // Push a fakeConn which matches the filter chains configured on the + // received Listener resource. Verify that Accept() returns. + fc = &fakeConn{ + local: &net.TCPAddr{IP: net.IPv4(192, 168, 1, 2)}, + remote: &net.TCPAddr{IP: net.IPv4(192, 168, 1, 2), Port: 80}, + closeCh: testutils.NewChannel(), + } + lis.acceptCh <- connAndErr{conn: fc} + if _, err := errCh.Receive(ctx); err != nil { + t.Fatalf("error when waiting for Accept() to return the conn on filter chain match: %v", err) + } +} diff --git a/xds/internal/server/rds_handler.go b/xds/internal/server/rds_handler.go new file mode 100644 index 000000000000..722748cbd526 --- /dev/null +++ b/xds/internal/server/rds_handler.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// rdsHandlerUpdate wraps the full RouteConfigUpdate that are dynamically +// queried for a given server side listener. +type rdsHandlerUpdate struct { + updates map[string]xdsresource.RouteConfigUpdate + err error +} + +// rdsHandler handles any RDS queries that need to be started for a given server +// side listeners Filter Chains (i.e. not inline). +type rdsHandler struct { + xdsC XDSClient + + mu sync.Mutex + updates map[string]xdsresource.RouteConfigUpdate + cancels map[string]func() + + // For a rdsHandler update, the only update wrapped listener cares about is + // most recent one, so this channel will be opportunistically drained before + // sending any new updates. + updateChannel chan rdsHandlerUpdate +} + +// newRDSHandler creates a new rdsHandler to watch for RDS resources. +// listenerWrapper updates the list of route names to watch by calling +// updateRouteNamesToWatch() upon receipt of new Listener configuration. +func newRDSHandler(xdsC XDSClient, ch chan rdsHandlerUpdate) *rdsHandler { + return &rdsHandler{ + xdsC: xdsC, + updateChannel: ch, + updates: make(map[string]xdsresource.RouteConfigUpdate), + cancels: make(map[string]func()), + } +} + +// updateRouteNamesToWatch handles a list of route names to watch for a given +// server side listener (if a filter chain specifies dynamic RDS configuration). +// This function handles all the logic with respect to any routes that may have +// been added or deleted as compared to what was previously present. +func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool) { + rh.mu.Lock() + defer rh.mu.Unlock() + // Add and start watches for any routes for any new routes in + // routeNamesToWatch. + for routeName := range routeNamesToWatch { + if _, ok := rh.cancels[routeName]; !ok { + func(routeName string) { + rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsresource.RouteConfigUpdate, err error) { + rh.handleRouteUpdate(routeName, update, err) + }) + }(routeName) + } + } + + // Delete and cancel watches for any routes from persisted routeNamesToWatch + // that are no longer present. + for routeName := range rh.cancels { + if _, ok := routeNamesToWatch[routeName]; !ok { + rh.cancels[routeName]() + delete(rh.cancels, routeName) + delete(rh.updates, routeName) + } + } + + // If the full list (determined by length) of updates are now successfully + // updated, the listener is ready to be updated. + if len(rh.updates) == len(rh.cancels) && len(routeNamesToWatch) != 0 { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{updates: rh.updates}) + } +} + +// handleRouteUpdate persists the route config for a given route name, and also +// sends an update to the Listener Wrapper on an error received or if the rds +// handler has a full collection of updates. +func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsresource.RouteConfigUpdate, err error) { + if err != nil { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{err: err}) + return + } + rh.mu.Lock() + defer rh.mu.Unlock() + rh.updates[routeName] = update + + // If the full list (determined by length) of updates have successfully + // updated, the listener is ready to be updated. + if len(rh.updates) == len(rh.cancels) { + drainAndPush(rh.updateChannel, rdsHandlerUpdate{updates: rh.updates}) + } +} + +func drainAndPush(ch chan rdsHandlerUpdate, update rdsHandlerUpdate) { + select { + case <-ch: + default: + } + ch <- update +} + +// close() is meant to be called by wrapped listener when the wrapped listener +// is closed, and it cleans up resources by canceling all the active RDS +// watches. +func (rh *rdsHandler) close() { + rh.mu.Lock() + defer rh.mu.Unlock() + for _, cancel := range rh.cancels { + cancel() + } +} diff --git a/xds/internal/server/rds_handler_test.go b/xds/internal/server/rds_handler_test.go new file mode 100644 index 000000000000..fc622851cfa2 --- /dev/null +++ b/xds/internal/server/rds_handler_test.go @@ -0,0 +1,401 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package server + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +const ( + route1 = "route1" + route2 = "route2" + route3 = "route3" +) + +// setupTests creates a rds handler with a fake xds client for control over the +// xds client. +func setupTests() (*rdsHandler, *fakeclient.Client, chan rdsHandlerUpdate) { + xdsC := fakeclient.NewClient() + ch := make(chan rdsHandlerUpdate, 1) + rh := newRDSHandler(xdsC, ch) + return rh, xdsC, ch +} + +// waitForFuncWithNames makes sure that a blocking function returns the correct +// set of names, where order doesn't matter. This takes away nondeterminism from +// ranging through a map. +func waitForFuncWithNames(ctx context.Context, f func(context.Context) (string, error), names ...string) error { + wantNames := make(map[string]bool, len(names)) + for _, name := range names { + wantNames[name] = true + } + gotNames := make(map[string]bool, len(names)) + for range wantNames { + name, err := f(ctx) + if err != nil { + return err + } + gotNames[name] = true + } + if !cmp.Equal(gotNames, wantNames) { + return fmt.Errorf("got routeNames %v, want %v", gotNames, wantNames) + } + return nil +} + +// TestSuccessCaseOneRDSWatch tests the simplest scenario: the rds handler +// receives a single route name, starts a watch for that route name, gets a +// successful update, and then writes an update to the update channel for +// listener to pick up. +func (s) TestSuccessCaseOneRDSWatch(t *testing.T) { + rh, fakeClient, ch := setupTests() + // When you first update the rds handler with a list of a single Route names + // that needs dynamic RDS Configuration, this Route name has not been seen + // before, so the RDS Handler should start a watch on that RouteName. + rh.updateRouteNamesToWatch(map[string]bool{route1: true}) + // The RDS Handler should start a watch for that routeName. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotRoute, err := fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error: %v", err) + } + if gotRoute != route1 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route1) + } + rdsUpdate := xdsresource.RouteConfigUpdate{} + // Invoke callback with the xds client with a certain route update. Due to + // this route update updating every route name that rds handler handles, + // this should write to the update channel to send to the listener. + fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil) + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate} + select { + case rhu := <-ch: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + // Close the rds handler. This is meant to be called when the lis wrapper is + // closed, and the call should cancel all the watches present (for this + // test, a single watch). + rh.close() + routeNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error: %v", err) + } + if routeNameDeleted != route1 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) + } +} + +// TestSuccessCaseTwoUpdates tests the case where the rds handler receives an +// update with a single Route, then receives a second update with two routes. +// The handler should start a watch for the added route, and if received a RDS +// update for that route it should send an update with both RDS updates present. +func (s) TestSuccessCaseTwoUpdates(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotRoute, err := fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error: %v", err) + } + if gotRoute != route1 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route1) + } + + // Update the RDSHandler with route names which adds a route name to watch. + // This should trigger the RDSHandler to start a watch for the added route + // name to watch. + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true}) + gotRoute, err = fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error: %v", err) + } + if gotRoute != route2 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route2) + } + + // Invoke the callback with an update for route 1. This shouldn't cause the + // handler to write an update, as it has not received RouteConfigurations + // for every RouteName. + rdsUpdate1 := xdsresource.RouteConfigUpdate{} + fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate1, nil) + + // The RDS Handler should not send an update. + sCtx, sCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCtxCancel() + select { + case <-ch: + t.Fatal("RDS Handler wrote an update to updateChannel when it shouldn't have, as each route name has not received an update yet") + case <-sCtx.Done(): + } + + // Invoke the callback with an update for route 2. This should cause the + // handler to write an update, as it has received RouteConfigurations for + // every RouteName. + rdsUpdate2 := xdsresource.RouteConfigUpdate{} + fakeClient.InvokeWatchRouteConfigCallback(route2, rdsUpdate2, nil) + // The RDS Handler should then update the listener wrapper with an update + // with two route configurations, as both route names the RDS Handler handles + // have received an update. + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate1, route2: rdsUpdate2} + select { + case rhu := <-ch: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the rds handler update to be written to the update buffer.") + } + + // Close the rds handler. This is meant to be called when the lis wrapper is + // closed, and the call should cancel all the watches present (for this + // test, two watches on route1 and route2). + rh.close() + if err = waitForFuncWithNames(ctx, fakeClient.WaitForCancelRouteConfigWatch, route1, route2); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } +} + +// TestSuccessCaseDeletedRoute tests the case where the rds handler receives an +// update with two routes, then receives an update with only one route. The RDS +// Handler is expected to cancel the watch for the route no longer present. +func (s) TestSuccessCaseDeletedRoute(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Will start two watches. + if err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } + + // Update the RDSHandler with route names which deletes a route name to + // watch. This should trigger the RDSHandler to cancel the watch for the + // deleted route name to watch. + rh.updateRouteNamesToWatch(map[string]bool{route1: true}) + // This should delete the watch for route2. + routeNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error %v", err) + } + if routeNameDeleted != route2 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route2) + } + + rdsUpdate := xdsresource.RouteConfigUpdate{} + // Invoke callback with the xds client with a certain route update. Due to + // this route update updating every route name that rds handler handles, + // this should write to the update channel to send to the listener. + fakeClient.InvokeWatchRouteConfigCallback(route1, rdsUpdate, nil) + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: rdsUpdate} + select { + case rhu := <-ch: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel.") + } + + rh.close() + routeNameDeleted, err = fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error: %v", err) + } + if routeNameDeleted != route1 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) + } +} + +// TestSuccessCaseTwoUpdatesAddAndDeleteRoute tests the case where the rds +// handler receives an update with two routes, and then receives an update with +// two routes, one previously there and one added (i.e. 12 -> 23). This should +// cause the route that is no longer there to be deleted and cancelled, and the +// route that was added should have a watch started for it. +func (s) TestSuccessCaseTwoUpdatesAddAndDeleteRoute(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } + + // Update the rds handler with two routes, one which was already there and a new route. + // This should cause the rds handler to delete/cancel watch for route 1 and start a watch + // for route 3. + rh.updateRouteNamesToWatch(map[string]bool{route2: true, route3: true}) + + // Start watch comes first, which should be for route3 as was just added. + gotRoute, err := fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error: %v", err) + } + if gotRoute != route3 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route3) + } + + // Then route 1 should be deleted/cancelled watch for, as it is no longer present + // in the new RouteName to watch map. + routeNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error: %v", err) + } + if routeNameDeleted != route1 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) + } + + // Invoke the callback with an update for route 2. This shouldn't cause the + // handler to write an update, as it has not received RouteConfigurations + // for every RouteName. + rdsUpdate2 := xdsresource.RouteConfigUpdate{} + fakeClient.InvokeWatchRouteConfigCallback(route2, rdsUpdate2, nil) + + // The RDS Handler should not send an update. + sCtx, sCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCtxCancel() + select { + case <-ch: + t.Fatalf("RDS Handler wrote an update to updateChannel when it shouldn't have, as each route name has not received an update yet") + case <-sCtx.Done(): + } + + // Invoke the callback with an update for route 3. This should cause the + // handler to write an update, as it has received RouteConfigurations for + // every RouteName. + rdsUpdate3 := xdsresource.RouteConfigUpdate{} + fakeClient.InvokeWatchRouteConfigCallback(route3, rdsUpdate3, nil) + // The RDS Handler should then update the listener wrapper with an update + // with two route configurations, as both route names the RDS Handler handles + // have received an update. + rhuWant := map[string]xdsresource.RouteConfigUpdate{route2: rdsUpdate2, route3: rdsUpdate3} + select { + case rhu := <-rh.updateChannel: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the rds handler update to be written to the update buffer.") + } + // Close the rds handler. This is meant to be called when the lis wrapper is + // closed, and the call should cancel all the watches present (for this + // test, two watches on route2 and route3). + rh.close() + if err = waitForFuncWithNames(ctx, fakeClient.WaitForCancelRouteConfigWatch, route2, route3); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } +} + +// TestSuccessCaseSecondUpdateMakesRouteFull tests the scenario where the rds handler gets +// told to watch three rds configurations, gets two successful updates, then gets told to watch +// only those two. The rds handler should then write an update to update buffer. +func (s) TestSuccessCaseSecondUpdateMakesRouteFull(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true, route3: true}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := waitForFuncWithNames(ctx, fakeClient.WaitForWatchRouteConfig, route1, route2, route3); err != nil { + t.Fatalf("Error while waiting for names: %v", err) + } + + // Invoke the callbacks for two of the three watches. Since RDS is not full, + // this shouldn't trigger rds handler to write an update to update buffer. + fakeClient.InvokeWatchRouteConfigCallback(route1, xdsresource.RouteConfigUpdate{}, nil) + fakeClient.InvokeWatchRouteConfigCallback(route2, xdsresource.RouteConfigUpdate{}, nil) + + // The RDS Handler should not send an update. + sCtx, sCtxCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCtxCancel() + select { + case <-rh.updateChannel: + t.Fatalf("RDS Handler wrote an update to updateChannel when it shouldn't have, as each route name has not received an update yet") + case <-sCtx.Done(): + } + + // Tell the rds handler to now only watch Route 1 and Route 2. This should + // trigger the rds handler to write an update to the update buffer as it now + // has full rds configuration. + rh.updateRouteNamesToWatch(map[string]bool{route1: true, route2: true}) + // Route 3 should be deleted/cancelled watch for, as it is no longer present + // in the new RouteName to watch map. + routeNameDeleted, err := fakeClient.WaitForCancelRouteConfigWatch(ctx) + if err != nil { + t.Fatalf("xdsClient.CancelRDS failed with error: %v", err) + } + if routeNameDeleted != route3 { + t.Fatalf("xdsClient.CancelRDS called for route %v, want %v", routeNameDeleted, route1) + } + rhuWant := map[string]xdsresource.RouteConfigUpdate{route1: {}, route2: {}} + select { + case rhu := <-ch: + if diff := cmp.Diff(rhu.updates, rhuWant); diff != "" { + t.Fatalf("got unexpected route update, diff (-got, +want): %v", diff) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for the rds handler update to be written to the update buffer.") + } +} + +// TestErrorReceived tests the case where the rds handler receives a route name +// to watch, then receives an update with an error. This error should be then +// written to the update channel. +func (s) TestErrorReceived(t *testing.T) { + rh, fakeClient, ch := setupTests() + + rh.updateRouteNamesToWatch(map[string]bool{route1: true}) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + gotRoute, err := fakeClient.WaitForWatchRouteConfig(ctx) + if err != nil { + t.Fatalf("xdsClient.WatchRDS failed with error %v", err) + } + if gotRoute != route1 { + t.Fatalf("xdsClient.WatchRDS called for route: %v, want %v", gotRoute, route1) + } + + rdsErr := errors.New("some error") + fakeClient.InvokeWatchRouteConfigCallback(route1, xdsresource.RouteConfigUpdate{}, rdsErr) + select { + case rhu := <-ch: + if rhu.err.Error() != "some error" { + t.Fatalf("Did not receive the expected error, instead received: %v", rhu.err.Error()) + } + case <-ctx.Done(): + t.Fatal("Timed out waiting for update from update channel") + } +} diff --git a/xds/internal/test/e2e/README.md b/xds/internal/test/e2e/README.md new file mode 100644 index 000000000000..33cffa0da56f --- /dev/null +++ b/xds/internal/test/e2e/README.md @@ -0,0 +1,19 @@ +Build client and server binaries. + +```sh +go build -o ./binaries/client ../../../../interop/xds/client/ +go build -o ./binaries/server ../../../../interop/xds/server/ +``` + +Run the test + +```sh +go test . -v +``` + +The client/server paths are flags + +```sh +go test . -v -client=$HOME/grpc-java/interop-testing/build/install/grpc-interop-testing/bin/xds-test-client +``` +Note that grpc logs are only turned on for Go. diff --git a/xds/internal/test/e2e/controlplane.go b/xds/internal/test/e2e/controlplane.go new file mode 100644 index 000000000000..98030dd448f5 --- /dev/null +++ b/xds/internal/test/e2e/controlplane.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e + +import ( + "fmt" + + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" +) + +type controlPlane struct { + server *e2e.ManagementServer + nodeID string + bootstrapContent string +} + +func newControlPlane() (*controlPlane, error) { + // Spin up an xDS management server on a local port. + server, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to spin up the xDS management server: %v", err) + } + + nodeID := uuid.New().String() + bootstrapContentBytes, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: server.Address, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + }) + if err != nil { + server.Stop() + return nil, fmt.Errorf("failed to create bootstrap file: %v", err) + } + + return &controlPlane{ + server: server, + nodeID: nodeID, + bootstrapContent: string(bootstrapContentBytes), + }, nil +} + +func (cp *controlPlane) stop() { + cp.server.Stop() +} diff --git a/xds/internal/test/e2e/e2e.go b/xds/internal/test/e2e/e2e.go new file mode 100644 index 000000000000..30b125b787a1 --- /dev/null +++ b/xds/internal/test/e2e/e2e.go @@ -0,0 +1,173 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package e2e implements xds e2e tests using go-control-plane. +package e2e + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + + "google.golang.org/grpc" + channelzgrpc "google.golang.org/grpc/channelz/grpc_channelz_v1" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + "google.golang.org/grpc/credentials/insecure" + testgrpc "google.golang.org/grpc/interop/grpc_testing" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +func cmd(path string, logger io.Writer, args []string, env []string) *exec.Cmd { + cmd := exec.Command(path, args...) + cmd.Env = append(os.Environ(), env...) + cmd.Stdout = logger + cmd.Stderr = logger + return cmd +} + +const ( + clientStatsPort = 60363 // TODO: make this different per-test, only needed for parallel tests. +) + +type client struct { + cmd *exec.Cmd + + target string + statsCC *grpc.ClientConn +} + +// newClient create a client with the given target and bootstrap content. +func newClient(target, binaryPath, bootstrap string, logger io.Writer, flags ...string) (*client, error) { + cmd := cmd( + binaryPath, + logger, + append([]string{ + "--server=" + target, + "--print_response=true", + "--qps=100", + fmt.Sprintf("--stats_port=%d", clientStatsPort), + }, flags...), // Append any flags from caller. + []string{ + "GRPC_GO_LOG_VERBOSITY_LEVEL=99", + "GRPC_GO_LOG_SEVERITY_LEVEL=info", + "GRPC_XDS_BOOTSTRAP_CONFIG=" + bootstrap, // The bootstrap content doesn't need to be quoted. + }, + ) + cmd.Start() + + cc, err := grpc.Dial(fmt.Sprintf("localhost:%d", clientStatsPort), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.WaitForReady(true))) + if err != nil { + return nil, err + } + return &client{ + cmd: cmd, + target: target, + statsCC: cc, + }, nil +} + +func (c *client) clientStats(ctx context.Context) (*testpb.LoadBalancerStatsResponse, error) { + ccc := testgrpc.NewLoadBalancerStatsServiceClient(c.statsCC) + return ccc.GetClientStats(ctx, &testpb.LoadBalancerStatsRequest{ + NumRpcs: 100, + TimeoutSec: 10, + }) +} + +func (c *client) configRPCs(ctx context.Context, req *testpb.ClientConfigureRequest) error { + ccc := testgrpc.NewXdsUpdateClientConfigureServiceClient(c.statsCC) + _, err := ccc.Configure(ctx, req) + return err +} + +func (c *client) channelzSubChannels(ctx context.Context) ([]*channelzpb.Subchannel, error) { + ccc := channelzgrpc.NewChannelzClient(c.statsCC) + r, err := ccc.GetTopChannels(ctx, &channelzpb.GetTopChannelsRequest{}) + if err != nil { + return nil, err + } + + var ret []*channelzpb.Subchannel + for _, cc := range r.Channel { + if cc.Data.Target != c.target { + continue + } + for _, sc := range cc.SubchannelRef { + rr, err := ccc.GetSubchannel(ctx, &channelzpb.GetSubchannelRequest{SubchannelId: sc.SubchannelId}) + if err != nil { + return nil, err + } + ret = append(ret, rr.Subchannel) + } + } + return ret, nil +} + +func (c *client) stop() { + c.cmd.Process.Kill() + c.cmd.Wait() +} + +const ( + serverPort = 50051 // TODO: make this different per-test, only needed for parallel tests. +) + +type server struct { + cmd *exec.Cmd + port int +} + +// newServer creates multiple servers with the given bootstrap content. +// +// Each server gets a different hostname, in the format of +// -. +func newServers(hostnamePrefix, binaryPath, bootstrap string, logger io.Writer, count int) (_ []*server, err error) { + var ret []*server + defer func() { + if err != nil { + for _, s := range ret { + s.stop() + } + } + }() + for i := 0; i < count; i++ { + port := serverPort + i + cmd := cmd( + binaryPath, + logger, + []string{ + fmt.Sprintf("--port=%d", port), + fmt.Sprintf("--host_name_override=%s-%d", hostnamePrefix, i), + }, + []string{ + "GRPC_GO_LOG_VERBOSITY_LEVEL=99", + "GRPC_GO_LOG_SEVERITY_LEVEL=info", + "GRPC_XDS_BOOTSTRAP_CONFIG=" + bootstrap, // The bootstrap content doesn't need to be quoted., + }, + ) + cmd.Start() + ret = append(ret, &server{cmd: cmd, port: port}) + } + return ret, nil +} + +func (s *server) stop() { + s.cmd.Process.Kill() + s.cmd.Wait() +} diff --git a/xds/internal/test/e2e/e2e_test.go b/xds/internal/test/e2e/e2e_test.go new file mode 100644 index 000000000000..be8af2b0a26a --- /dev/null +++ b/xds/internal/test/e2e/e2e_test.go @@ -0,0 +1,259 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e + +import ( + "bytes" + "context" + "flag" + "fmt" + "os" + "strconv" + "testing" + "time" + + "google.golang.org/grpc/internal/testutils/xds/e2e" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" + testpb "google.golang.org/grpc/interop/grpc_testing" +) + +var ( + clientPath = flag.String("client", "./binaries/client", "The interop client") + serverPath = flag.String("server", "./binaries/server", "The interop server") +) + +type testOpts struct { + testName string + backendCount int + clientFlags []string +} + +func setup(t *testing.T, opts testOpts) (*controlPlane, *client, []*server) { + t.Helper() + if _, err := os.Stat(*clientPath); os.IsNotExist(err) { + t.Skip("skipped because client is not found") + } + if _, err := os.Stat(*serverPath); os.IsNotExist(err) { + t.Skip("skipped because server is not found") + } + backendCount := 1 + if opts.backendCount != 0 { + backendCount = opts.backendCount + } + + cp, err := newControlPlane() + if err != nil { + t.Fatalf("failed to start control-plane: %v", err) + } + t.Cleanup(cp.stop) + + var clientLog bytes.Buffer + c, err := newClient(fmt.Sprintf("xds:///%s", opts.testName), *clientPath, cp.bootstrapContent, &clientLog, opts.clientFlags...) + if err != nil { + t.Fatalf("failed to start client: %v", err) + } + t.Cleanup(c.stop) + + var serverLog bytes.Buffer + servers, err := newServers(opts.testName, *serverPath, cp.bootstrapContent, &serverLog, backendCount) + if err != nil { + t.Fatalf("failed to start server: %v", err) + } + t.Cleanup(func() { + for _, s := range servers { + s.stop() + } + }) + t.Cleanup(func() { + // TODO: find a better way to print the log. They are long, and hide the failure. + t.Logf("\n----- client logs -----\n%v", clientLog.String()) + t.Logf("\n----- server logs -----\n%v", serverLog.String()) + }) + return cp, c, servers +} + +func TestPingPong(t *testing.T) { + const testName = "pingpong" + cp, c, _ := setup(t, testOpts{testName: testName}) + + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: testName, + NodeID: cp.nodeID, + Host: "localhost", + Port: serverPort, + SecLevel: e2e.SecurityLevelNone, + }) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := cp.server.Update(ctx, resources); err != nil { + t.Fatalf("failed to update control plane resources: %v", err) + } + + st, err := c.clientStats(ctx) + if err != nil { + t.Fatalf("failed to get client stats: %v", err) + } + if st.NumFailures != 0 { + t.Fatalf("Got %v failures: %+v", st.NumFailures, st) + } +} + +// TestAffinity covers the affinity tests with ringhash policy. +// - client is configured to use ringhash, with 3 backends +// - all RPCs will hash a specific metadata header +// - verify that +// - all RPCs with the same metadata value are sent to the same backend +// - only one backend is Ready +// +// - send more RPCs with different metadata values until a new backend is picked, and verify that +// - only two backends are in Ready +func TestAffinity(t *testing.T) { + const ( + testName = "affinity" + backendCount = 3 + testMDKey = "xds_md" + testMDValue = "unary_yranu" + ) + cp, c, servers := setup(t, testOpts{ + testName: testName, + backendCount: backendCount, + clientFlags: []string{"--rpc=EmptyCall", fmt.Sprintf("--metadata=EmptyCall:%s:%s", testMDKey, testMDValue)}, + }) + + resources := e2e.DefaultClientResources(e2e.ResourceParams{ + DialTarget: testName, + NodeID: cp.nodeID, + Host: "localhost", + Port: serverPort, + SecLevel: e2e.SecurityLevelNone, + }) + + // Update EDS to multiple backends. + var ports []uint32 + for _, s := range servers { + ports = append(ports, uint32(s.port)) + } + edsMsg := resources.Endpoints[0] + resources.Endpoints[0] = e2e.DefaultEndpoint( + edsMsg.ClusterName, + "localhost", + ports, + ) + + // Update CDS lbpolicy to ringhash. + cdsMsg := resources.Clusters[0] + cdsMsg.LbPolicy = v3clusterpb.Cluster_RING_HASH + + // Update RDS to hash the header. + rdsMsg := resources.Routes[0] + rdsMsg.VirtualHosts[0].Routes[0].Action = &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: cdsMsg.Name}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{{ + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: testMDKey, + }, + }, + }}, + }} + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := cp.server.Update(ctx, resources); err != nil { + t.Fatalf("failed to update control plane resources: %v", err) + } + + // Note: We can skip CSDS check because there's no long delay as in TD. + // + // The client stats check doesn't race with the xds resource update because + // there's only one version of xds resource, updated at the beginning of the + // test. So there's no need to retry the stats call. + // + // In the future, we may add tests that update xds in the middle. Then we + // either need to retry clientStats(), or make a CSDS check before so the + // result is stable. + + st, err := c.clientStats(ctx) + if err != nil { + t.Fatalf("failed to get client stats: %v", err) + } + if st.NumFailures != 0 { + t.Fatalf("Got %v failures: %+v", st.NumFailures, st) + } + if len(st.RpcsByPeer) != 1 { + t.Fatalf("more than 1 backends got traffic: %v, want 1", st.RpcsByPeer) + } + + // Call channelz to verify that only one subchannel is in state Ready. + scs, err := c.channelzSubChannels(ctx) + if err != nil { + t.Fatalf("failed to fetch channelz: %v", err) + } + verifySubConnStates(t, scs, map[channelzpb.ChannelConnectivityState_State]int{ + channelzpb.ChannelConnectivityState_READY: 1, + channelzpb.ChannelConnectivityState_IDLE: 2, + }) + + // Send Unary call with different metadata value with integers starting from + // 0. Stop when a second peer is picked. + var ( + diffPeerPicked bool + mdValue int + ) + for !diffPeerPicked { + if err := c.configRPCs(ctx, &testpb.ClientConfigureRequest{ + Types: []testpb.ClientConfigureRequest_RpcType{ + testpb.ClientConfigureRequest_EMPTY_CALL, + testpb.ClientConfigureRequest_UNARY_CALL, + }, + Metadata: []*testpb.ClientConfigureRequest_Metadata{ + {Type: testpb.ClientConfigureRequest_EMPTY_CALL, Key: testMDKey, Value: testMDValue}, + {Type: testpb.ClientConfigureRequest_UNARY_CALL, Key: testMDKey, Value: strconv.Itoa(mdValue)}, + }, + }); err != nil { + t.Fatalf("failed to configure RPC: %v", err) + } + + st, err := c.clientStats(ctx) + if err != nil { + t.Fatalf("failed to get client stats: %v", err) + } + if st.NumFailures != 0 { + t.Fatalf("Got %v failures: %+v", st.NumFailures, st) + } + if len(st.RpcsByPeer) == 2 { + break + } + + mdValue++ + } + + // Call channelz to verify that only one subchannel is in state Ready. + scs2, err := c.channelzSubChannels(ctx) + if err != nil { + t.Fatalf("failed to fetch channelz: %v", err) + } + verifySubConnStates(t, scs2, map[channelzpb.ChannelConnectivityState_State]int{ + channelzpb.ChannelConnectivityState_READY: 2, + channelzpb.ChannelConnectivityState_IDLE: 1, + }) +} diff --git a/xds/internal/test/e2e/e2e_utils.go b/xds/internal/test/e2e/e2e_utils.go new file mode 100644 index 000000000000..34b0ee9eb092 --- /dev/null +++ b/xds/internal/test/e2e/e2e_utils.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + channelzpb "google.golang.org/grpc/channelz/grpc_channelz_v1" +) + +func verifySubConnStates(t *testing.T, scs []*channelzpb.Subchannel, want map[channelzpb.ChannelConnectivityState_State]int) { + t.Helper() + var scStatsCount = map[channelzpb.ChannelConnectivityState_State]int{} + for _, sc := range scs { + scStatsCount[sc.Data.State.State]++ + } + if diff := cmp.Diff(scStatsCount, want); diff != "" { + t.Fatalf("got unexpected number of subchannels in state Ready, %v, scs: %v", diff, scs) + } +} diff --git a/xds/internal/test/e2e/run.sh b/xds/internal/test/e2e/run.sh new file mode 100755 index 000000000000..4363d6cbd94f --- /dev/null +++ b/xds/internal/test/e2e/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +mkdir binaries +go build -o ./binaries/client ../../../../interop/xds/client/ +go build -o ./binaries/server ../../../../interop/xds/server/ +go test . -v diff --git a/xds/internal/test/xds_client_integration_test.go b/xds/internal/test/xds_client_integration_test.go deleted file mode 100644 index f97e42af2a0a..000000000000 --- a/xds/internal/test/xds_client_integration_test.go +++ /dev/null @@ -1,112 +0,0 @@ -// +build !386 - -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xds_test - -import ( - "context" - "net" - "testing" - - "github.com/google/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/internal/xds" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/e2e" - - testpb "google.golang.org/grpc/test/grpc_testing" -) - -// clientSetup performs a bunch of steps common to all xDS client tests here: -// - spin up an xDS management server on a local port -// - spin up a gRPC server and register the test service on it -// - create a local TCP listener and start serving on it -// -// Returns the following: -// - the management server: tests use this to configure resources -// - nodeID expected by the management server: this is set in the Node proto -// sent by the xdsClient for queries. -// - the port the server is listening on -// - cleanup function to be invoked by the tests when done -func clientSetup(t *testing.T) (*e2e.ManagementServer, string, uint32, func()) { - // Spin up a xDS management server on a local port. - nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() - if err != nil { - t.Fatal(err) - } - - // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xds.SetupBootstrapFile(xds.BootstrapOptions{ - Version: xds.TransportV3, - NodeID: nodeID, - ServerURI: fs.Address, - }) - if err != nil { - t.Fatal(err) - } - - // Initialize a gRPC server and register the stubServer on it. - server := grpc.NewServer() - testpb.RegisterTestServiceServer(server, &testService{}) - - // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - - go func() { - if err := server.Serve(lis); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - - return fs, nodeID, uint32(lis.Addr().(*net.TCPAddr).Port), func() { - fs.Stop() - bootstrapCleanup() - server.Stop() - } -} - -func (s) TestClientSideXDS(t *testing.T) { - fs, nodeID, port, cleanup := clientSetup(t) - defer cleanup() - - resources := e2e.DefaultClientResources("myservice", nodeID, "localhost", port) - if err := fs.Update(resources); err != nil { - t.Fatal(err) - } - - // Create a ClientConn and make a successful RPC. - cc, err := grpc.Dial("xds:///myservice", grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } -} diff --git a/xds/internal/test/xds_server_integration_test.go b/xds/internal/test/xds_server_integration_test.go deleted file mode 100644 index f18d4e356aa6..000000000000 --- a/xds/internal/test/xds_server_integration_test.go +++ /dev/null @@ -1,403 +0,0 @@ -// +build !386 - -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package xds_test contains e2e tests for xDS use. -package xds_test - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "strconv" - "testing" - - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "github.com/google/uuid" - xds2 "google.golang.org/grpc/internal/xds" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/credentials/insecure" - xdscreds "google.golang.org/grpc/credentials/xds" - "google.golang.org/grpc/status" - testpb "google.golang.org/grpc/test/grpc_testing" - "google.golang.org/grpc/testdata" - "google.golang.org/grpc/xds" - "google.golang.org/grpc/xds/internal/testutils" - "google.golang.org/grpc/xds/internal/testutils/e2e" - "google.golang.org/grpc/xds/internal/version" -) - -const ( - // Names of files inside tempdir, for certprovider plugin to watch. - certFile = "cert.pem" - keyFile = "key.pem" - rootFile = "ca.pem" - - // Template for server Listener resource name. - serverListenerResourceNameTemplate = "grpc/server?xds.resource.listening_address=%s" -) - -func createTmpFile(t *testing.T, src, dst string) { - t.Helper() - - data, err := ioutil.ReadFile(src) - if err != nil { - t.Fatalf("ioutil.ReadFile(%q) failed: %v", src, err) - } - if err := ioutil.WriteFile(dst, data, os.ModePerm); err != nil { - t.Fatalf("ioutil.WriteFile(%q) failed: %v", dst, err) - } - t.Logf("Wrote file at: %s", dst) - t.Logf("%s", string(data)) -} - -// createTempDirWithFiles creates a temporary directory under the system default -// tempDir with the given dirSuffix. It also reads from certSrc, keySrc and -// rootSrc files are creates appropriate files under the newly create tempDir. -// Returns the name of the created tempDir. -func createTmpDirWithFiles(t *testing.T, dirSuffix, certSrc, keySrc, rootSrc string) string { - t.Helper() - - // Create a temp directory. Passing an empty string for the first argument - // uses the system temp directory. - dir, err := ioutil.TempDir("", dirSuffix) - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - t.Logf("Using tmpdir: %s", dir) - - createTmpFile(t, testdata.Path(certSrc), path.Join(dir, certFile)) - createTmpFile(t, testdata.Path(keySrc), path.Join(dir, keyFile)) - createTmpFile(t, testdata.Path(rootSrc), path.Join(dir, rootFile)) - return dir -} - -// createClientTLSCredentials creates client-side TLS transport credentials. -func createClientTLSCredentials(t *testing.T) credentials.TransportCredentials { - cert, err := tls.LoadX509KeyPair(testdata.Path("x509/client1_cert.pem"), testdata.Path("x509/client1_key.pem")) - if err != nil { - t.Fatalf("tls.LoadX509KeyPair(x509/client1_cert.pem, x509/client1_key.pem) failed: %v", err) - } - b, err := ioutil.ReadFile(testdata.Path("x509/server_ca_cert.pem")) - if err != nil { - t.Fatalf("ioutil.ReadFile(x509/server_ca_cert.pem) failed: %v", err) - } - roots := x509.NewCertPool() - if !roots.AppendCertsFromPEM(b) { - t.Fatal("failed to append certificates") - } - return credentials.NewTLS(&tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: roots, - ServerName: "x.test.example.com", - }) -} - -// commonSetup performs a bunch of steps common to all xDS server tests here: -// - spin up an xDS management server on a local port -// - set up certificates for consumption by the file_watcher plugin -// - spin up an xDS-enabled gRPC server, configure it with xdsCredentials and -// register the test service on it -// - create a local TCP listener and start serving on it -// -// Returns the following: -// - the management server: tests use this to configure resources -// - nodeID expected by the management server: this is set in the Node proto -// sent by the xdsClient used on the xDS-enabled gRPC server -// - local listener on which the xDS-enabled gRPC server is serving on -// - cleanup function to be invoked by the tests when done -func commonSetup(t *testing.T) (*e2e.ManagementServer, string, net.Listener, func()) { - t.Helper() - - // Spin up a xDS management server on a local port. - nodeID := uuid.New().String() - fs, err := e2e.StartManagementServer() - if err != nil { - t.Fatal(err) - } - - // Create certificate and key files in a temporary directory and generate - // certificate provider configuration for a file_watcher plugin. - tmpdir := createTmpDirWithFiles(t, "testServerSideXDS*", "x509/server1_cert.pem", "x509/server1_key.pem", "x509/client_ca_cert.pem") - cpc := e2e.DefaultFileWatcherConfig(path.Join(tmpdir, certFile), path.Join(tmpdir, keyFile), path.Join(tmpdir, rootFile)) - - // Create a bootstrap file in a temporary directory. - bootstrapCleanup, err := xds2.SetupBootstrapFile(xds2.BootstrapOptions{ - Version: xds2.TransportV3, - NodeID: nodeID, - ServerURI: fs.Address, - CertificateProviders: cpc, - ServerListenerResourceNameTemplate: serverListenerResourceNameTemplate, - }) - if err != nil { - t.Fatal(err) - } - - // Configure xDS credentials to be used on the server-side. - creds, err := xdscreds.NewServerCredentials(xdscreds.ServerOptions{ - FallbackCreds: insecure.NewCredentials(), - }) - if err != nil { - t.Fatal(err) - } - - // Initialize an xDS-enabled gRPC server and register the stubServer on it. - server := xds.NewGRPCServer(grpc.Creds(creds)) - testpb.RegisterTestServiceServer(server, &testService{}) - - // Create a local listener and pass it to Serve(). - lis, err := testutils.LocalTCPListener() - if err != nil { - t.Fatalf("testutils.LocalTCPListener() failed: %v", err) - } - - go func() { - if err := server.Serve(lis); err != nil { - t.Errorf("Serve() failed: %v", err) - } - }() - - return fs, nodeID, lis, func() { - fs.Stop() - bootstrapCleanup() - server.Stop() - } -} - -func hostPortFromListener(t *testing.T, lis net.Listener) (string, uint32) { - t.Helper() - - host, p, err := net.SplitHostPort(lis.Addr().String()) - if err != nil { - t.Fatalf("net.SplitHostPort(%s) failed: %v", lis.Addr().String(), err) - } - port, err := strconv.ParseInt(p, 10, 32) - if err != nil { - t.Fatalf("strconv.ParseInt(%s, 10, 32) failed: %v", p, err) - } - return host, uint32(port) - -} - -// listenerResourceWithoutSecurityConfig returns a listener resource with no -// security configuration, and name and address fields matching the passed in -// net.Listener. -func listenerResourceWithoutSecurityConfig(t *testing.T, lis net.Listener) *v3listenerpb.Listener { - host, port := hostPortFromListener(t, lis) - return &v3listenerpb.Listener{ - // This needs to match the name we are querying for. - Name: fmt.Sprintf(serverListenerResourceNameTemplate, lis.Addr().String()), - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: port, - }, - }, - }, - }, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - }, - }, - } -} - -// listenerResourceWithSecurityConfig returns a listener resource with security -// configuration pointing to the use of the file_watcher certificate provider -// plugin, and name and address fields matching the passed in net.Listener. -func listenerResourceWithSecurityConfig(t *testing.T, lis net.Listener) *v3listenerpb.Listener { - host, port := hostPortFromListener(t, lis) - return &v3listenerpb.Listener{ - // This needs to match the name we are querying for. - Name: fmt.Sprintf(serverListenerResourceNameTemplate, lis.Addr().String()), - Address: &v3corepb.Address{ - Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Address: host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{ - PortValue: port, - }}}}, - FilterChains: []*v3listenerpb.FilterChain{ - { - Name: "filter-chain-1", - TransportSocket: &v3corepb.TransportSocket{ - Name: "envoy.transport_sockets.tls", - ConfigType: &v3corepb.TransportSocket_TypedConfig{ - TypedConfig: &anypb.Any{ - TypeUrl: version.V3DownstreamTLSContextURL, - Value: func() []byte { - tls := &v3tlspb.DownstreamTlsContext{ - RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, - CommonTlsContext: &v3tlspb.CommonTlsContext{ - TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "google_cloud_private_spiffe", - }, - ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ - ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ - InstanceName: "google_cloud_private_spiffe", - }}}} - mtls, _ := proto.Marshal(tls) - return mtls - }(), - }}}}}, - } -} - -// TestServerSideXDS_Fallback is an e2e test where xDS is enabled on the -// server-side and xdsCredentials are configured for security. The control plane -// does not provide any security configuration and therefore the xdsCredentials -// uses fallback credentials, which in this case is insecure creds. -func (s) TestServerSideXDS_Fallback(t *testing.T) { - fs, nodeID, lis, cleanup := commonSetup(t) - defer cleanup() - - // Setup the fake management server to respond with a Listener resource that - // does not contain any security configuration. This should force the - // server-side xdsCredentials to use fallback. - listener := listenerResourceWithoutSecurityConfig(t, lis) - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{listener}, - }); err != nil { - t.Error(err) - } - - // Create a ClientConn and make a successful RPC. - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cc, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } -} - -// TestServerSideXDS_FileWatcherCerts is an e2e test where xDS is enabled on the -// server-side and xdsCredentials are configured for security. The control plane -// sends security configuration pointing to the use of the file_watcher plugin, -// and we verify that a client connecting with TLS creds is able to successfully -// make an RPC. -func (s) TestServerSideXDS_FileWatcherCerts(t *testing.T) { - fs, nodeID, lis, cleanup := commonSetup(t) - defer cleanup() - - // Setup the fake management server to respond with a Listener resource with - // security configuration pointing to the file watcher plugin and requiring - // mTLS. - listener := listenerResourceWithSecurityConfig(t, lis) - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{listener}, - }); err != nil { - t.Error(err) - } - - // Create a ClientConn with TLS creds and make a successful RPC. - clientCreds := createClientTLSCredentials(t) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cc, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(clientCreds)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - client := testpb.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } -} - -// TestServerSideXDS_SecurityConfigChange is an e2e test where xDS is enabled on -// the server-side and xdsCredentials are configured for security. The control -// plane initially does not any security configuration. This forces the -// xdsCredentials to use fallback creds, which is this case is insecure creds. -// We verify that a client connecting with TLS creds is not able to successfully -// make an RPC. The control plan then sends a listener resource with security -// configuration pointing to the use of the file_watcher plugin and we verify -// that the same client is now able to successfully make an RPC. -func (s) TestServerSideXDS_SecurityConfigChange(t *testing.T) { - fs, nodeID, lis, cleanup := commonSetup(t) - defer cleanup() - - // Setup the fake management server to respond with a Listener resource that - // does not contain any security configuration. This should force the - // server-side xdsCredentials to use fallback. - listener := listenerResourceWithoutSecurityConfig(t, lis) - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{listener}, - }); err != nil { - t.Error(err) - } - - // Create a ClientConn with TLS creds. This should fail since the server is - // using fallback credentials which in this case in insecure creds. - clientCreds := createClientTLSCredentials(t) - ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) - defer cancel() - cc, err := grpc.DialContext(ctx, lis.Addr().String(), grpc.WithTransportCredentials(clientCreds)) - if err != nil { - t.Fatalf("failed to dial local test server: %v", err) - } - defer cc.Close() - - // We don't set 'waitForReady` here since we want this call to failfast. - client := testpb.NewTestServiceClient(cc) - if _, err := client.EmptyCall(ctx, &testpb.Empty{}); status.Convert(err).Code() != codes.Unavailable { - t.Fatal("rpc EmptyCall() succeeded when expected to fail") - } - - // Setup the fake management server to respond with a Listener resource with - // security configuration pointing to the file watcher plugin and requiring - // mTLS. - listener = listenerResourceWithSecurityConfig(t, lis) - if err := fs.Update(e2e.UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{listener}, - }); err != nil { - t.Error(err) - } - - // Make another RPC with `waitForReady` set and expect this to succeed. - if _, err := client.EmptyCall(ctx, &testpb.Empty{}, grpc.WaitForReady(true)); err != nil { - t.Fatalf("rpc EmptyCall() failed: %v", err) - } -} diff --git a/xds/internal/testutils/balancer_test.go b/xds/internal/testutils/balancer_test.go index 4891eb9cdadf..b5f7f665396c 100644 --- a/xds/internal/testutils/balancer_test.go +++ b/xds/internal/testutils/balancer_test.go @@ -22,13 +22,14 @@ import ( "testing" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/testutils" ) func TestIsRoundRobin(t *testing.T) { var ( - sc1 = TestSubConns[0] - sc2 = TestSubConns[1] - sc3 = TestSubConns[2] + sc1 = testutils.TestSubConns[0] + sc2 = testutils.TestSubConns[1] + sc3 = testutils.TestSubConns[2] ) testCases := []struct { @@ -125,10 +126,22 @@ func TestIsRoundRobin(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - err := IsRoundRobin(tC.want, (&testClosure{r: tC.got}).next) + err := testutils.IsRoundRobin(tC.want, (&testClosure{r: tC.got}).next) if err == nil != tC.pass { t.Errorf("want pass %v, want %v, got err %v", tC.pass, tC.want, err) } }) } } + +// testClosure is a test util for TestIsRoundRobin. +type testClosure struct { + r []balancer.SubConn + i int +} + +func (tc *testClosure) next() balancer.SubConn { + ret := tc.r[tc.i] + tc.i = (tc.i + 1) % len(tc.r) + return ret +} diff --git a/xds/internal/testutils/e2e/clientresources.go b/xds/internal/testutils/e2e/clientresources.go deleted file mode 100644 index 79424b13b918..000000000000 --- a/xds/internal/testutils/e2e/clientresources.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package e2e - -import ( - "github.com/envoyproxy/go-control-plane/pkg/wellknown" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" - v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" - anypb "github.com/golang/protobuf/ptypes/any" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" -) - -func any(m proto.Message) *anypb.Any { - a, err := ptypes.MarshalAny(m) - if err != nil { - panic("error marshalling any: " + err.Error()) - } - return a -} - -// DefaultClientResources returns a set of resources (LDS, RDS, CDS, EDS) for a -// client to generically connect to one server. -func DefaultClientResources(target, nodeID, host string, port uint32) UpdateOptions { - const routeConfigName = "route" - const clusterName = "cluster" - const endpointsName = "endpoints" - - return UpdateOptions{ - NodeID: nodeID, - Listeners: []*v3listenerpb.Listener{DefaultListener(target, routeConfigName)}, - Routes: []*v3routepb.RouteConfiguration{DefaultRouteConfig(routeConfigName, target, clusterName)}, - Clusters: []*v3clusterpb.Cluster{DefaultCluster(clusterName, endpointsName)}, - Endpoints: []*v3endpointpb.ClusterLoadAssignment{DefaultEndpoint(endpointsName, host, port)}, - } -} - -// DefaultListener returns a basic xds Listener resource. -func DefaultListener(target, routeName string) *v3listenerpb.Listener { - hcm := any(&v3httppb.HttpConnectionManager{ - RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{Rds: &v3httppb.Rds{ - ConfigSource: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, - }, - RouteConfigName: routeName, - }}, - HttpFilters: []*v3httppb.HttpFilter{HTTPFilter("router", &v3routerpb.Router{})}, // router fields are unused by grpc - }) - return &v3listenerpb.Listener{ - Name: target, - ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, - FilterChains: []*v3listenerpb.FilterChain{{ - Name: "filter-chain-name", - Filters: []*v3listenerpb.Filter{{ - Name: wellknown.HTTPConnectionManager, - ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, - }}, - }}, - } -} - -// HTTPFilter constructs an xds HttpFilter with the provided name and config. -func HTTPFilter(name string, config proto.Message) *v3httppb.HttpFilter { - return &v3httppb.HttpFilter{ - Name: name, - ConfigType: &v3httppb.HttpFilter_TypedConfig{ - TypedConfig: any(config), - }, - } -} - -// DefaultRouteConfig returns a basic xds RouteConfig resource. -func DefaultRouteConfig(routeName, ldsTarget, clusterName string) *v3routepb.RouteConfiguration { - return &v3routepb.RouteConfiguration{ - Name: routeName, - VirtualHosts: []*v3routepb.VirtualHost{{ - Domains: []string{ldsTarget}, - Routes: []*v3routepb.Route{{ - Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, - Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ - ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, - }}, - }}, - }}, - } -} - -// DefaultCluster returns a basic xds Cluster resource. -func DefaultCluster(clusterName, edsServiceName string) *v3clusterpb.Cluster { - return &v3clusterpb.Cluster{ - Name: clusterName, - ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, - EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ - EdsConfig: &v3corepb.ConfigSource{ - ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ - Ads: &v3corepb.AggregatedConfigSource{}, - }, - }, - ServiceName: edsServiceName, - }, - LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, - } -} - -// DefaultEndpoint returns a basic xds Endpoint resource. -func DefaultEndpoint(clusterName string, host string, port uint32) *v3endpointpb.ClusterLoadAssignment { - return &v3endpointpb.ClusterLoadAssignment{ - ClusterName: clusterName, - Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ - Locality: &v3corepb.Locality{SubZone: "subzone"}, - LbEndpoints: []*v3endpointpb.LbEndpoint{{ - HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{Endpoint: &v3endpointpb.Endpoint{ - Address: &v3corepb.Address{Address: &v3corepb.Address_SocketAddress{ - SocketAddress: &v3corepb.SocketAddress{ - Protocol: v3corepb.SocketAddress_TCP, - Address: host, - PortSpecifier: &v3corepb.SocketAddress_PortValue{PortValue: uint32(port)}}, - }}, - }}, - }}, - LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, - Priority: 0, - }}, - } -} diff --git a/xds/internal/testutils/e2e/server.go b/xds/internal/testutils/e2e/server.go deleted file mode 100644 index 9ec2eb0d6f2e..000000000000 --- a/xds/internal/testutils/e2e/server.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package e2e provides utilities for end2end testing of xDS functionality. -package e2e - -import ( - "context" - "fmt" - "net" - "reflect" - "strconv" - - v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" - v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" - v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" - v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" - v3discoverygrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" - "github.com/envoyproxy/go-control-plane/pkg/cache/types" - v3cache "github.com/envoyproxy/go-control-plane/pkg/cache/v3" - v3server "github.com/envoyproxy/go-control-plane/pkg/server/v3" - - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" -) - -var logger = grpclog.Component("xds-e2e") - -// serverLogger implements the Logger interface defined at -// envoyproxy/go-control-plane/pkg/log. This is passed to the Snapshot cache. -type serverLogger struct{} - -func (l serverLogger) Debugf(format string, args ...interface{}) { logger.Infof(format, args...) } -func (l serverLogger) Infof(format string, args ...interface{}) { logger.Infof(format, args...) } -func (l serverLogger) Warnf(format string, args ...interface{}) { logger.Warningf(format, args...) } -func (l serverLogger) Errorf(format string, args ...interface{}) { logger.Errorf(format, args...) } - -// ManagementServer is a thin wrapper around the xDS control plane -// implementation provided by envoyproxy/go-control-plane. -type ManagementServer struct { - // Address is the host:port on which the management server is listening for - // new connections. - Address string - - cancel context.CancelFunc // To stop the v3 ADS service. - xs v3server.Server // v3 implementation of ADS. - gs *grpc.Server // gRPC server which exports the ADS service. - cache v3cache.SnapshotCache // Resource snapshot. - version int // Version of resource snapshot. -} - -// StartManagementServer initializes a management server which implements the -// AggregatedDiscoveryService endpoint. The management server is initialized -// with no resources. Tests should call the Update() method to change the -// resource snapshot held by the management server, as required by the test -// logic. When the test is done, it should call the Stop() method to cleanup -// resources allocated by the management server. -func StartManagementServer() (*ManagementServer, error) { - // Create a snapshot cache. - cache := v3cache.NewSnapshotCache(true, v3cache.IDHash{}, serverLogger{}) - logger.Infof("Created new snapshot cache...") - - lis, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, fmt.Errorf("failed to start xDS management server: %v", err) - } - - // Create an xDS management server and register the ADS implementation - // provided by it on a gRPC server. Cancelling the context passed to the - // server is the only way of stopping it at the end of the test. - ctx, cancel := context.WithCancel(context.Background()) - xs := v3server.NewServer(ctx, cache, v3server.CallbackFuncs{}) - gs := grpc.NewServer() - v3discoverygrpc.RegisterAggregatedDiscoveryServiceServer(gs, xs) - logger.Infof("Registered Aggregated Discovery Service (ADS)...") - - // Start serving. - go gs.Serve(lis) - logger.Infof("xDS management server serving at: %v...", lis.Addr().String()) - - return &ManagementServer{ - Address: lis.Addr().String(), - cancel: cancel, - version: 0, - gs: gs, - xs: xs, - cache: cache, - }, nil -} - -// UpdateOptions wraps parameters to be passed to the Update() method. -type UpdateOptions struct { - // NodeID is the id of the client to which this update is to be pushed. - NodeID string - // Endpoints, Clusters, Routes, and Listeners are the updated list of xds - // resources for the server. All must be provided with each Update. - Endpoints []*v3endpointpb.ClusterLoadAssignment - Clusters []*v3clusterpb.Cluster - Routes []*v3routepb.RouteConfiguration - Listeners []*v3listenerpb.Listener - // SkipValidation indicates whether we want to skip validation (by not - // calling snapshot.Consistent()). It can be useful for negative tests, - // where we send updates that the client will NACK. - SkipValidation bool -} - -// Update changes the resource snapshot held by the management server, which -// updates connected clients as required. -func (s *ManagementServer) Update(opts UpdateOptions) error { - s.version++ - - // Create a snapshot with the passed in resources. - snapshot := v3cache.NewSnapshot(strconv.Itoa(s.version), resourceSlice(opts.Endpoints), resourceSlice(opts.Clusters), resourceSlice(opts.Routes), resourceSlice(opts.Listeners), nil /*runtimes*/, nil /*secrets*/) - if !opts.SkipValidation { - if err := snapshot.Consistent(); err != nil { - return fmt.Errorf("failed to create new resource snapshot: %v", err) - } - } - logger.Infof("Created new resource snapshot...") - - // Update the cache with the new resource snapshot. - if err := s.cache.SetSnapshot(opts.NodeID, snapshot); err != nil { - return fmt.Errorf("failed to update resource snapshot in management server: %v", err) - } - logger.Infof("Updated snapshot cache with resource snapshot...") - return nil -} - -// Stop stops the management server. -func (s *ManagementServer) Stop() { - if s.cancel != nil { - s.cancel() - } - s.gs.Stop() - logger.Infof("Stopped the xDS management server...") -} - -// resourceSlice accepts a slice of any type of proto messages and returns a -// slice of types.Resource. Will panic if there is an input type mismatch. -func resourceSlice(i interface{}) []types.Resource { - v := reflect.ValueOf(i) - rs := make([]types.Resource, v.Len()) - for i := 0; i < v.Len(); i++ { - rs[i] = v.Index(i).Interface().(types.Resource) - } - return rs -} diff --git a/xds/internal/testutils/fakeclient/client.go b/xds/internal/testutils/fakeclient/client.go index 0978125b8aeb..9794425c501f 100644 --- a/xds/internal/testutils/fakeclient/client.go +++ b/xds/internal/testutils/fakeclient/client.go @@ -23,14 +23,20 @@ import ( "context" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" - "google.golang.org/grpc/xds/internal/client/load" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) // Client is a fake implementation of an xds client. It exposes a bunch of // channels to signal the occurrence of various events. type Client struct { + // Embed XDSClient so this fake client implements the interface, but it's + // never set (it's always nil). This may cause nil panic since not all the + // methods are implemented. + xdsclient.XDSClient + name string ldsWatchCh *testutils.Channel rdsWatchCh *testutils.Channel @@ -41,18 +47,18 @@ type Client struct { cdsCancelCh *testutils.Channel edsCancelCh *testutils.Channel loadReportCh *testutils.Channel - closeCh *testutils.Channel + lrsCancelCh *testutils.Channel loadStore *load.Store bootstrapCfg *bootstrap.Config - ldsCb func(xdsclient.ListenerUpdate, error) - rdsCb func(xdsclient.RouteConfigUpdate, error) - cdsCb func(xdsclient.ClusterUpdate, error) - edsCb func(xdsclient.EndpointsUpdate, error) + ldsCb func(xdsresource.ListenerUpdate, error) + rdsCbs map[string]func(xdsresource.RouteConfigUpdate, error) + cdsCbs map[string]func(xdsresource.ClusterUpdate, error) + edsCbs map[string]func(xdsresource.EndpointsUpdate, error) } // WatchListener registers a LDS watch. -func (xdsC *Client) WatchListener(serviceName string, callback func(xdsclient.ListenerUpdate, error)) func() { +func (xdsC *Client) WatchListener(serviceName string, callback func(xdsresource.ListenerUpdate, error)) func() { xdsC.ldsCb = callback xdsC.ldsWatchCh.Send(serviceName) return func() { @@ -74,7 +80,7 @@ func (xdsC *Client) WaitForWatchListener(ctx context.Context) (string, error) { // // Not thread safe with WatchListener. Only call this after // WaitForWatchListener. -func (xdsC *Client) InvokeWatchListenerCallback(update xdsclient.ListenerUpdate, err error) { +func (xdsC *Client) InvokeWatchListenerCallback(update xdsresource.ListenerUpdate, err error) { xdsC.ldsCb(update, err) } @@ -86,11 +92,11 @@ func (xdsC *Client) WaitForCancelListenerWatch(ctx context.Context) error { } // WatchRouteConfig registers a RDS watch. -func (xdsC *Client) WatchRouteConfig(routeName string, callback func(xdsclient.RouteConfigUpdate, error)) func() { - xdsC.rdsCb = callback +func (xdsC *Client) WatchRouteConfig(routeName string, callback func(xdsresource.RouteConfigUpdate, error)) func() { + xdsC.rdsCbs[routeName] = callback xdsC.rdsWatchCh.Send(routeName) return func() { - xdsC.rdsCancelCh.Send(nil) + xdsC.rdsCancelCh.Send(routeName) } } @@ -108,23 +114,39 @@ func (xdsC *Client) WaitForWatchRouteConfig(ctx context.Context) (string, error) // // Not thread safe with WatchRouteConfig. Only call this after // WaitForWatchRouteConfig. -func (xdsC *Client) InvokeWatchRouteConfigCallback(update xdsclient.RouteConfigUpdate, err error) { - xdsC.rdsCb(update, err) +func (xdsC *Client) InvokeWatchRouteConfigCallback(name string, update xdsresource.RouteConfigUpdate, err error) { + if len(xdsC.rdsCbs) != 1 { + xdsC.rdsCbs[name](update, err) + return + } + // Keeps functionality with previous usage of this on client side, if single + // callback call that callback. + var routeName string + for route := range xdsC.rdsCbs { + routeName = route + } + xdsC.rdsCbs[routeName](update, err) } // WaitForCancelRouteConfigWatch waits for a RDS watch to be cancelled and returns // context.DeadlineExceeded otherwise. -func (xdsC *Client) WaitForCancelRouteConfigWatch(ctx context.Context) error { - _, err := xdsC.rdsCancelCh.Receive(ctx) - return err +func (xdsC *Client) WaitForCancelRouteConfigWatch(ctx context.Context) (string, error) { + val, err := xdsC.rdsCancelCh.Receive(ctx) + if err != nil { + return "", err + } + return val.(string), err } // WatchCluster registers a CDS watch. -func (xdsC *Client) WatchCluster(clusterName string, callback func(xdsclient.ClusterUpdate, error)) func() { - xdsC.cdsCb = callback +func (xdsC *Client) WatchCluster(clusterName string, callback func(xdsresource.ClusterUpdate, error)) func() { + // Due to the tree like structure of aggregate clusters, there can be multiple callbacks persisted for each cluster + // node. However, the client doesn't care about the parent child relationship between the nodes, only that it invokes + // the right callback for a particular cluster. + xdsC.cdsCbs[clusterName] = callback xdsC.cdsWatchCh.Send(clusterName) return func() { - xdsC.cdsCancelCh.Send(nil) + xdsC.cdsCancelCh.Send(clusterName) } } @@ -142,23 +164,37 @@ func (xdsC *Client) WaitForWatchCluster(ctx context.Context) (string, error) { // // Not thread safe with WatchCluster. Only call this after // WaitForWatchCluster. -func (xdsC *Client) InvokeWatchClusterCallback(update xdsclient.ClusterUpdate, err error) { - xdsC.cdsCb(update, err) +func (xdsC *Client) InvokeWatchClusterCallback(update xdsresource.ClusterUpdate, err error) { + // Keeps functionality with previous usage of this, if single callback call that callback. + if len(xdsC.cdsCbs) == 1 { + var clusterName string + for cluster := range xdsC.cdsCbs { + clusterName = cluster + } + xdsC.cdsCbs[clusterName](update, err) + } else { + // Have what callback you call with the update determined by the service name in the ClusterUpdate. Left up to the + // caller to make sure the cluster update matches with a persisted callback. + xdsC.cdsCbs[update.ClusterName](update, err) + } } // WaitForCancelClusterWatch waits for a CDS watch to be cancelled and returns // context.DeadlineExceeded otherwise. -func (xdsC *Client) WaitForCancelClusterWatch(ctx context.Context) error { - _, err := xdsC.cdsCancelCh.Receive(ctx) - return err +func (xdsC *Client) WaitForCancelClusterWatch(ctx context.Context) (string, error) { + clusterNameReceived, err := xdsC.cdsCancelCh.Receive(ctx) + if err != nil { + return "", err + } + return clusterNameReceived.(string), err } // WatchEndpoints registers an EDS watch for provided clusterName. -func (xdsC *Client) WatchEndpoints(clusterName string, callback func(xdsclient.EndpointsUpdate, error)) (cancel func()) { - xdsC.edsCb = callback +func (xdsC *Client) WatchEndpoints(clusterName string, callback func(xdsresource.EndpointsUpdate, error)) (cancel func()) { + xdsC.edsCbs[clusterName] = callback xdsC.edsWatchCh.Send(clusterName) return func() { - xdsC.edsCancelCh.Send(nil) + xdsC.edsCancelCh.Send(clusterName) } } @@ -176,27 +212,49 @@ func (xdsC *Client) WaitForWatchEDS(ctx context.Context) (string, error) { // // Not thread safe with WatchEndpoints. Only call this after // WaitForWatchEDS. -func (xdsC *Client) InvokeWatchEDSCallback(update xdsclient.EndpointsUpdate, err error) { - xdsC.edsCb(update, err) +func (xdsC *Client) InvokeWatchEDSCallback(name string, update xdsresource.EndpointsUpdate, err error) { + if len(xdsC.edsCbs) != 1 { + // This may panic if name isn't found. But it's fine for tests. + xdsC.edsCbs[name](update, err) + return + } + // Keeps functionality with previous usage of this, if single callback call + // that callback. + for n := range xdsC.edsCbs { + name = n + } + xdsC.edsCbs[name](update, err) } // WaitForCancelEDSWatch waits for a EDS watch to be cancelled and returns // context.DeadlineExceeded otherwise. -func (xdsC *Client) WaitForCancelEDSWatch(ctx context.Context) error { - _, err := xdsC.edsCancelCh.Receive(ctx) - return err +func (xdsC *Client) WaitForCancelEDSWatch(ctx context.Context) (string, error) { + edsNameReceived, err := xdsC.edsCancelCh.Receive(ctx) + if err != nil { + return "", err + } + return edsNameReceived.(string), err } // ReportLoadArgs wraps the arguments passed to ReportLoad. type ReportLoadArgs struct { // Server is the name of the server to which the load is reported. - Server string + Server *bootstrap.ServerConfig } // ReportLoad starts reporting load about clusterName to server. -func (xdsC *Client) ReportLoad(server string) (loadStore *load.Store, cancel func()) { +func (xdsC *Client) ReportLoad(server *bootstrap.ServerConfig) (loadStore *load.Store, cancel func()) { xdsC.loadReportCh.Send(ReportLoadArgs{Server: server}) - return xdsC.loadStore, func() {} + return xdsC.loadStore, func() { + xdsC.lrsCancelCh.Send(nil) + } +} + +// WaitForCancelReportLoad waits for a load report to be cancelled and returns +// context.DeadlineExceeded otherwise. +func (xdsC *Client) WaitForCancelReportLoad(ctx context.Context) error { + _, err := xdsC.lrsCancelCh.Receive(ctx) + return err } // LoadStore returns the underlying load data store. @@ -208,19 +266,10 @@ func (xdsC *Client) LoadStore() *load.Store { // returns the arguments passed to it. func (xdsC *Client) WaitForReportLoad(ctx context.Context) (ReportLoadArgs, error) { val, err := xdsC.loadReportCh.Receive(ctx) - return val.(ReportLoadArgs), err -} - -// Close closes the xds client. -func (xdsC *Client) Close() { - xdsC.closeCh.Send(nil) -} - -// WaitForClose waits for Close to be invoked on this client and returns -// context.DeadlineExceeded otherwise. -func (xdsC *Client) WaitForClose(ctx context.Context) error { - _, err := xdsC.closeCh.Receive(ctx) - return err + if err != nil { + return ReportLoadArgs{}, err + } + return val.(ReportLoadArgs), nil } // BootstrapConfig returns the bootstrap config. @@ -249,16 +298,20 @@ func NewClient() *Client { func NewClientWithName(name string) *Client { return &Client{ name: name, - ldsWatchCh: testutils.NewChannel(), - rdsWatchCh: testutils.NewChannel(), - cdsWatchCh: testutils.NewChannel(), - edsWatchCh: testutils.NewChannel(), - ldsCancelCh: testutils.NewChannel(), - rdsCancelCh: testutils.NewChannel(), - cdsCancelCh: testutils.NewChannel(), - edsCancelCh: testutils.NewChannel(), + ldsWatchCh: testutils.NewChannelWithSize(10), + rdsWatchCh: testutils.NewChannelWithSize(10), + cdsWatchCh: testutils.NewChannelWithSize(10), + edsWatchCh: testutils.NewChannelWithSize(10), + ldsCancelCh: testutils.NewChannelWithSize(10), + rdsCancelCh: testutils.NewChannelWithSize(10), + cdsCancelCh: testutils.NewChannelWithSize(10), + edsCancelCh: testutils.NewChannelWithSize(10), loadReportCh: testutils.NewChannel(), - closeCh: testutils.NewChannel(), + lrsCancelCh: testutils.NewChannel(), loadStore: load.NewStore(), + bootstrapCfg: &bootstrap.Config{ClientDefaultListenerResourceNameTemplate: "%s"}, + rdsCbs: make(map[string]func(xdsresource.RouteConfigUpdate, error)), + cdsCbs: make(map[string]func(xdsresource.ClusterUpdate, error)), + edsCbs: make(map[string]func(xdsresource.EndpointsUpdate, error)), } } diff --git a/xds/internal/testutils/protos.go b/xds/internal/testutils/protos.go index e0dba0e2b301..fc3cdf307fcd 100644 --- a/xds/internal/testutils/protos.go +++ b/xds/internal/testutils/protos.go @@ -59,7 +59,7 @@ type ClusterLoadAssignmentBuilder struct { // NewClusterLoadAssignmentBuilder creates a ClusterLoadAssignmentBuilder. func NewClusterLoadAssignmentBuilder(clusterName string, dropPercents map[string]uint32) *ClusterLoadAssignmentBuilder { - var drops []*v2xdspb.ClusterLoadAssignment_Policy_DropOverload + drops := make([]*v2xdspb.ClusterLoadAssignment_Policy_DropOverload, 0, len(dropPercents)) for n, d := range dropPercents { drops = append(drops, &v2xdspb.ClusterLoadAssignment_Policy_DropOverload{ Category: n, @@ -88,7 +88,7 @@ type AddLocalityOptions struct { // AddLocality adds a locality to the builder. func (clab *ClusterLoadAssignmentBuilder) AddLocality(subzone string, weight uint32, priority uint32, addrsWithPort []string, opts *AddLocalityOptions) { - var lbEndPoints []*v2endpointpb.LbEndpoint + lbEndPoints := make([]*v2endpointpb.LbEndpoint, 0, len(addrsWithPort)) for i, a := range addrsWithPort { host, portStr, err := net.SplitHostPort(a) if err != nil { diff --git a/xds/internal/testutils/resource_watcher.go b/xds/internal/testutils/resource_watcher.go new file mode 100644 index 000000000000..aac9c1464774 --- /dev/null +++ b/xds/internal/testutils/resource_watcher.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package testutils + +import "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + +// TestResourceWatcher implements the xdsresource.ResourceWatcher interface, +// used to receive updates on watches registered with the xDS client, when using +// the resource-type agnostic WatchResource API. +// +// Tests can the channels provided by this tyep to get access to updates and +// errors sent by the xDS client. +type TestResourceWatcher struct { + // UpdateCh is the channel on which xDS client updates are delivered. + UpdateCh chan *xdsresource.ResourceData + // ErrorCh is the channel on which errors from the xDS client are delivered. + ErrorCh chan error + // ResourceDoesNotExistCh is the channel used to indicate calls to OnResourceDoesNotExist + ResourceDoesNotExistCh chan struct{} +} + +// OnUpdate is invoked by the xDS client to report the latest update on the resource +// being watched. +func (w *TestResourceWatcher) OnUpdate(data xdsresource.ResourceData) { + select { + case <-w.UpdateCh: + default: + } + w.UpdateCh <- &data +} + +// OnError is invoked by the xDS client to report the latest error. +func (w *TestResourceWatcher) OnError(err error) { + select { + case <-w.ErrorCh: + default: + } + w.ErrorCh <- err +} + +// OnResourceDoesNotExist is used by the xDS client to report that the resource +// being watched no longer exists. +func (w *TestResourceWatcher) OnResourceDoesNotExist() { + select { + case <-w.ResourceDoesNotExistCh: + default: + } + w.ResourceDoesNotExistCh <- struct{}{} +} + +// NewTestResourceWatcher returns a TestResourceWatcher to watch for resources +// via the xDS client. +func NewTestResourceWatcher() *TestResourceWatcher { + return &TestResourceWatcher{ + UpdateCh: make(chan *xdsresource.ResourceData, 1), + ErrorCh: make(chan error, 1), + ResourceDoesNotExistCh: make(chan struct{}, 1), + } +} diff --git a/xds/internal/testutils/testutils.go b/xds/internal/testutils/testutils.go new file mode 100644 index 000000000000..44891780e0c4 --- /dev/null +++ b/xds/internal/testutils/testutils.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package testutils provides utility types, for use in xds tests. +package testutils + +import ( + "fmt" + "testing" + + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" +) + +// BuildResourceName returns the resource name in the format of an xdstp:// +// resource. +func BuildResourceName(typeName, auth, id string, ctxParams map[string]string) string { + var typS string + switch typeName { + case xdsresource.ListenerResourceTypeName: + typS = version.V3ListenerType + case xdsresource.RouteConfigTypeName: + typS = version.V3RouteConfigType + case xdsresource.ClusterResourceTypeName: + typS = version.V3ClusterType + case xdsresource.EndpointsResourceTypeName: + typS = version.V3EndpointsType + default: + // If the name doesn't match any of the standard resources fallback + // to the type name. + typS = typeName + } + return (&xdsresource.Name{ + Scheme: "xdstp", + Authority: auth, + Type: typS, + ID: id, + ContextParams: ctxParams, + }).String() +} + +// ServerConfigForAddress returns a bootstrap.ServerConfig for the given address +// with default values of insecure channel_creds and v3 server_features. +func ServerConfigForAddress(t *testing.T, addr string) *bootstrap.ServerConfig { + t.Helper() + + jsonCfg := fmt.Sprintf(`{ + "server_uri": "%s", + "channel_creds": [{"type": "insecure"}], + "server_features": ["xds_v3"] + }`, addr) + sc, err := bootstrap.ServerConfigFromJSON([]byte(jsonCfg)) + if err != nil { + t.Fatalf("Failed to create server config from JSON %s: %v", jsonCfg, err) + } + return sc +} diff --git a/xds/internal/version/version.go b/xds/internal/version/version.go deleted file mode 100644 index dbcb76ffd1f1..000000000000 --- a/xds/internal/version/version.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package version defines constants to distinguish between supported xDS API -// versions. -package version - -// TransportAPI refers to the API version for xDS transport protocol. This -// describes the xDS gRPC endpoint and version of DiscoveryRequest/Response used -// on the wire. -type TransportAPI int - -const ( - // TransportV2 refers to the v2 xDS transport protocol. - TransportV2 TransportAPI = iota - // TransportV3 refers to the v3 xDS transport protocol. - TransportV3 -) - -// Resource URLs. We need to be able to accept either version of the resource -// regardless of the version of the transport protocol in use. -const ( - V2ListenerURL = "type.googleapis.com/envoy.api.v2.Listener" - V2RouteConfigURL = "type.googleapis.com/envoy.api.v2.RouteConfiguration" - V2ClusterURL = "type.googleapis.com/envoy.api.v2.Cluster" - V2EndpointsURL = "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment" - V2HTTPConnManagerURL = "type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager" - - V3ListenerURL = "type.googleapis.com/envoy.config.listener.v3.Listener" - V3RouteConfigURL = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" - V3ClusterURL = "type.googleapis.com/envoy.config.cluster.v3.Cluster" - V3EndpointsURL = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" - V3HTTPConnManagerURL = "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" - V3UpstreamTLSContextURL = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" - V3DownstreamTLSContextURL = "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" -) diff --git a/xds/internal/xdsclient/attributes.go b/xds/internal/xdsclient/attributes.go new file mode 100644 index 000000000000..9076a76fd0dc --- /dev/null +++ b/xds/internal/xdsclient/attributes.go @@ -0,0 +1,36 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import "google.golang.org/grpc/resolver" + +type clientKeyType string + +const clientKey = clientKeyType("grpc.xds.internal.client.Client") + +// FromResolverState returns the Client from state, or nil if not present. +func FromResolverState(state resolver.State) XDSClient { + cs, _ := state.Attributes.Value(clientKey).(XDSClient) + return cs +} + +// SetClient sets c in state and returns the new state. +func SetClient(state resolver.State, c XDSClient) resolver.State { + state.Attributes = state.Attributes.WithValue(clientKey, c) + return state +} diff --git a/xds/internal/xdsclient/authority.go b/xds/internal/xdsclient/authority.go new file mode 100644 index 000000000000..61adf794e9b7 --- /dev/null +++ b/xds/internal/xdsclient/authority.go @@ -0,0 +1,600 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" +) + +type watchState int + +const ( + watchStateStarted watchState = iota // Watch started, request not yet set. + watchStateRequested // Request sent for resource being watched. + watchStateReceived // Response received for resource being watched. + watchStateTimeout // Watch timer expired, no response. + watchStateCanceled // Watch cancelled. +) + +type resourceState struct { + watchers map[xdsresource.ResourceWatcher]bool // Set of watchers for this resource + cache xdsresource.ResourceData // Most recent ACKed update for this resource + md xdsresource.UpdateMetadata // Metadata for the most recent update + deletionIgnored bool // True if resource deletion was ignored for a prior update + + // Common watch state for all watchers of this resource. + wTimer *time.Timer // Expiry timer + wState watchState // State of the watch +} + +// authority wraps all state associated with a single management server. It +// contains the transport used to communicate with the management server and a +// cache of resource state for resources requested from the management server. +// +// Bootstrap configuration could contain multiple entries in the authorities map +// that share the same server config (server address and credentials to use). We +// share the same authority instance amongst these entries, and the reference +// counting is taken care of by the `clientImpl` type. +type authority struct { + serverCfg *bootstrap.ServerConfig // Server config for this authority + bootstrapCfg *bootstrap.Config // Full bootstrap configuration + refCount int // Reference count of watches referring to this authority + serializer *grpcsync.CallbackSerializer // Callback serializer for invoking watch callbacks + resourceTypeGetter func(string) xdsresource.Type // ResourceType registry lookup + transport *transport.Transport // Underlying xDS transport to the management server + watchExpiryTimeout time.Duration // Resource watch expiry timeout + logger *grpclog.PrefixLogger + + // A two level map containing the state of all the resources being watched. + // + // The first level map key is the ResourceType (Listener, Route etc). This + // allows us to have a single map for all resources instead of having per + // resource-type maps. + // + // The second level map key is the resource name, with the value being the + // actual state of the resource. + resourcesMu sync.Mutex + resources map[xdsresource.Type]map[string]*resourceState +} + +// authorityArgs is a convenience struct to wrap arguments required to create a +// new authority. All fields here correspond directly to appropriate fields +// stored in the authority struct. +type authorityArgs struct { + // The reason for passing server config and bootstrap config separately + // (although the former is part of the latter) is because authorities in the + // bootstrap config might contain an empty server config, and in this case, + // the top-level server config is to be used. + // + // There are two code paths from where a new authority struct might be + // created. One is when a watch is registered for a resource, and one is + // when load reporting needs to be started. We have the authority name in + // the first case, but do in the second. We only have the server config in + // the second case. + serverCfg *bootstrap.ServerConfig + bootstrapCfg *bootstrap.Config + serializer *grpcsync.CallbackSerializer + resourceTypeGetter func(string) xdsresource.Type + watchExpiryTimeout time.Duration + logger *grpclog.PrefixLogger +} + +func newAuthority(args authorityArgs) (*authority, error) { + ret := &authority{ + serverCfg: args.serverCfg, + bootstrapCfg: args.bootstrapCfg, + serializer: args.serializer, + resourceTypeGetter: args.resourceTypeGetter, + watchExpiryTimeout: args.watchExpiryTimeout, + logger: args.logger, + resources: make(map[xdsresource.Type]map[string]*resourceState), + } + + tr, err := transport.New(transport.Options{ + ServerCfg: *args.serverCfg, + OnRecvHandler: ret.handleResourceUpdate, + OnErrorHandler: ret.newConnectionError, + OnSendHandler: ret.transportOnSendHandler, + Logger: args.logger, + NodeProto: args.bootstrapCfg.NodeProto, + }) + if err != nil { + return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) + } + ret.transport = tr + return ret, nil +} + +// transportOnSendHandler is called by the underlying transport when it sends a +// resource request successfully. Timers are activated for resources waiting for +// a response. +func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { + rType := a.resourceTypeGetter(u.URL) + // Resource type not found is not expected under normal circumstances, since + // the resource type url passed to the transport is determined by the authority. + if rType == nil { + a.logger.Warningf("Unknown resource type url: %s.", u.URL) + return + } + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + a.startWatchTimersLocked(rType, u.ResourceNames) +} + +func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate) error { + rType := a.resourceTypeGetter(resourceUpdate.URL) + if rType == nil { + return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) + } + + opts := &xdsresource.DecodeOptions{BootstrapConfig: a.bootstrapCfg} + updates, md, err := decodeAllResources(opts, rType, resourceUpdate) + a.updateResourceStateAndScheduleCallbacks(rType, updates, md) + return err +} + +func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + resourceStates := a.resources[rType] + for name, uErr := range updates { + if state, ok := resourceStates[name]; ok { + // Cancel the expiry timer associated with the resource once a + // response is received, irrespective of whether the update is a + // good one or not. + // + // We check for watch states `started` and `requested` here to + // accommodate for a race which can happen in the following + // scenario: + // - When a watch is registered, it is possible that the ADS stream + // is not yet created. In this case, the request for the resource + // is not sent out immediately. An entry in the `resourceStates` + // map is created with a watch state of `started`. + // - Once the stream is created, it is possible that the management + // server might respond with the requested resource before we send + // out request for the same. If we don't check for `started` here, + // and move the state to `received`, we will end up starting the + // timer when the request gets sent out. And since the mangement + // server already sent us the resource, there is a good chance + // that it will not send it again. This would eventually lead to + // the timer firing, even though we have the resource in the + // cache. + if state.wState == watchStateStarted || state.wState == watchStateRequested { + // It is OK to ignore the return value from Stop() here because + // if the timer has already fired, it means that the timer watch + // expiry callback is blocked on the same lock that we currently + // hold. Since we move the state to `received` here, the timer + // callback will be a no-op. + if state.wTimer != nil { + state.wTimer.Stop() + } + state.wState = watchStateReceived + } + + if uErr.err != nil { + // On error, keep previous version of the resource. But update + // status and error. + state.md.ErrState = md.ErrState + state.md.Status = md.Status + for watcher := range state.watchers { + watcher := watcher + err := uErr.err + a.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + } + continue + } + + if state.deletionIgnored { + state.deletionIgnored = false + a.logger.Infof("A valid update was received for resource %q of type %q after previously ignoring a deletion", name, rType.TypeName()) + } + // Notify watchers only if this is a first time update or it is different + // from the one currently cached. + if state.cache == nil || !state.cache.Equal(uErr.resource) { + for watcher := range state.watchers { + watcher := watcher + resource := uErr.resource + a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) + } + } + // Sync cache. + a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeName(), name) + state.cache = uErr.resource + // Set status to ACK, and clear error state. The metadata might be a + // NACK metadata because some other resources in the same response + // are invalid. + state.md = md + state.md.ErrState = nil + state.md.Status = xdsresource.ServiceStatusACKed + if md.ErrState != nil { + state.md.Version = md.ErrState.Version + } + } + } + + // If this resource type requires that all resources be present in every + // SotW response from the server, a response that does not include a + // previously seen resource will be interpreted as a deletion of that + // resource unless ignore_resource_deletion option was set in the server + // config. + if !rType.AllResourcesRequiredInSotW() { + return + } + for name, state := range resourceStates { + if state.cache == nil { + // If the resource state does not contain a cached update, which can + // happen when: + // - resource was newly requested but has not yet been received, or, + // - resource was removed as part of a previous update, + // we don't want to generate an error for the watchers. + // + // For the first of the above two conditions, this ADS response may + // be in reaction to an earlier request that did not yet request the + // new resource, so its absence from the response does not + // necessarily indicate that the resource does not exist. For that + // case, we rely on the request timeout instead. + // + // For the second of the above two conditions, we already generated + // an error when we received the first response which removed this + // resource. So, there is no need to generate another one. + continue + } + if _, ok := updates[name]; !ok { + // The metadata status is set to "ServiceStatusNotExist" if a + // previous update deleted this resource, in which case we do not + // want to repeatedly call the watch callbacks with a + // "resource-not-found" error. + if state.md.Status == xdsresource.ServiceStatusNotExist { + continue + } + // Per A53, resource deletions are ignored if the `ignore_resource_deletion` + // server feature is enabled through the bootstrap configuration. If the + // resource deletion is to be ignored, the resource is not removed from + // the cache and the corresponding OnResourceDoesNotExist() callback is + // not invoked on the watchers. + if a.serverCfg.IgnoreResourceDeletion { + if !state.deletionIgnored { + state.deletionIgnored = true + a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) + } + continue + } + // If resource exists in cache, but not in the new update, delete + // the resource from cache, and also send a resource not found error + // to indicate resource removed. Metadata for the resource is still + // maintained, as this is required by CSDS. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + } + } + } +} + +type resourceDataErrTuple struct { + resource xdsresource.ResourceData + err error +} + +func decodeAllResources(opts *xdsresource.DecodeOptions, rType xdsresource.Type, update transport.ResourceUpdate) (map[string]resourceDataErrTuple, xdsresource.UpdateMetadata, error) { + timestamp := time.Now() + md := xdsresource.UpdateMetadata{ + Version: update.Version, + Timestamp: timestamp, + } + + topLevelErrors := make([]error, 0) // Tracks deserialization errors, where we don't have a resource name. + perResourceErrors := make(map[string]error) // Tracks resource validation errors, where we have a resource name. + ret := make(map[string]resourceDataErrTuple) // Return result, a map from resource name to either resource data or error. + for _, r := range update.Resources { + result, err := rType.Decode(opts, r) + + // Name field of the result is left unpopulated only when resource + // deserialization fails. + name := "" + if result != nil { + name = xdsresource.ParseName(result.Name).String() + } + if err == nil { + ret[name] = resourceDataErrTuple{resource: result.Resource} + continue + } + if name == "" { + topLevelErrors = append(topLevelErrors, err) + continue + } + perResourceErrors[name] = err + // Add place holder in the map so we know this resource name was in + // the response. + ret[name] = resourceDataErrTuple{err: err} + } + + if len(topLevelErrors) == 0 && len(perResourceErrors) == 0 { + md.Status = xdsresource.ServiceStatusACKed + return ret, md, nil + } + + md.Status = xdsresource.ServiceStatusNACKed + errRet := combineErrors(rType.TypeName(), topLevelErrors, perResourceErrors) + md.ErrState = &xdsresource.UpdateErrorMetadata{ + Version: update.Version, + Err: errRet, + Timestamp: timestamp, + } + return ret, md, errRet +} + +// startWatchTimersLocked is invoked upon transport.OnSend() callback with resources +// requested on the underlying ADS stream. This satisfies the conditions to start +// watch timers per A57 [https://github.com/grpc/proposal/blob/master/A57-xds-client-failure-mode-behavior.md#handling-resources-that-do-not-exist] +// +// Caller must hold a.resourcesMu. +func (a *authority) startWatchTimersLocked(rType xdsresource.Type, resourceNames []string) { + resourceStates := a.resources[rType] + for _, resourceName := range resourceNames { + if state, ok := resourceStates[resourceName]; ok { + if state.wState != watchStateStarted { + continue + } + state.wTimer = time.AfterFunc(a.watchExpiryTimeout, func() { + a.handleWatchTimerExpiry(rType, resourceName, state) + }) + state.wState = watchStateRequested + } + } +} + +// stopWatchTimersLocked is invoked upon connection errors to stops watch timers +// for resources that have been requested, but not yet responded to by the management +// server. +// +// Caller must hold a.resourcesMu. +func (a *authority) stopWatchTimersLocked() { + for _, rType := range a.resources { + for resourceName, state := range rType { + if state.wState != watchStateRequested { + continue + } + if !state.wTimer.Stop() { + // If the timer has already fired, it means that the timer watch expiry + // callback is blocked on the same lock that we currently hold. Don't change + // the watch state and instead let the watch expiry callback handle it. + a.logger.Warningf("Watch timer for resource %v already fired. Ignoring here.", resourceName) + continue + } + state.wTimer = nil + state.wState = watchStateStarted + } + } +} + +// newConnectionError is called by the underlying transport when it receives a +// connection error. The error will be forwarded to all the resource watchers. +func (a *authority) newConnectionError(err error) { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + a.stopWatchTimersLocked() + + // We do not consider it an error if the ADS stream was closed after having received + // a response on the stream. This is because there are legitimate reasons why the server + // may need to close the stream during normal operations, such as needing to rebalance + // load or the underlying connection hitting its max connection age limit. + // See gRFC A57 for more details. + if xdsresource.ErrType(err) == xdsresource.ErrTypeStreamFailedAfterRecv { + a.logger.Warningf("Watchers not notified since ADS stream failed after having received at least one response: %v", err) + return + } + + for _, rType := range a.resources { + for _, state := range rType { + // Propagate the connection error from the transport layer to all watchers. + for watcher := range state.watchers { + watcher := watcher + a.serializer.Schedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + }) + } + } + } +} + +// Increments the reference count. Caller must hold parent's authorityMu. +func (a *authority) refLocked() { + a.refCount++ +} + +// Decrements the reference count. Caller must hold parent's authorityMu. +func (a *authority) unrefLocked() int { + a.refCount-- + return a.refCount +} + +func (a *authority) close() { + a.transport.Close() +} + +func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { + a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeName(), resourceName) + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // Lookup the ResourceType specific resources from the top-level map. If + // there is no entry for this ResourceType, create one. + resources := a.resources[rType] + if resources == nil { + resources = make(map[string]*resourceState) + a.resources[rType] = resources + } + + // Lookup the resourceState for the particular resource that the watch is + // being registered for. If this is the first watch for this resource, + // instruct the transport layer to send a DiscoveryRequest for the same. + state := resources[resourceName] + if state == nil { + a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + state = &resourceState{ + watchers: make(map[xdsresource.ResourceWatcher]bool), + md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, + wState: watchStateStarted, + } + resources[resourceName] = state + a.sendDiscoveryRequestLocked(rType, resources) + } + // Always add the new watcher to the set of watchers. + state.watchers[watcher] = true + + // If we have a cached copy of the resource, notify the new watcher. + if state.cache != nil { + a.logger.Debugf("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) + resource := state.cache + a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) + } + + return func() { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + // We already have a reference to the resourceState for this particular + // resource. Avoid indexing into the two-level map to figure this out. + + // Delete this particular watcher from the list of watchers, so that its + // callback will not be invoked in the future. + state.wState = watchStateCanceled + delete(state.watchers, watcher) + if len(state.watchers) > 0 { + return + } + + // There are no more watchers for this resource, delete the state + // associated with it, and instruct the transport to send a request + // which does not include this resource name. + a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + delete(resources, resourceName) + a.sendDiscoveryRequestLocked(rType, resources) + } +} + +func (a *authority) handleWatchTimerExpiry(rType xdsresource.Type, resourceName string, state *resourceState) { + a.logger.Warningf("Watch for resource %q of type %s timed out", resourceName, rType.TypeName()) + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + switch state.wState { + case watchStateRequested: + // This is the only state where we need to handle the timer expiry by + // invoking appropriate watch callbacks. This is handled outside the switch. + case watchStateCanceled: + return + default: + a.logger.Warningf("Unexpected watch state %q for resource %q.", state.wState, resourceName) + return + } + + state.wState = watchStateTimeout + // With the watch timer firing, it is safe to assume that the resource does + // not exist on the management server. + state.cache = nil + state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} + for watcher := range state.watchers { + watcher := watcher + a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + } +} + +// sendDiscoveryRequestLocked sends a discovery request for the specified +// resource type and resource names. Even though this method does not directly +// access the resource cache, it is important that `resourcesMu` be beld when +// calling this method to ensure that a consistent snapshot of resource names is +// being requested. +func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { + resourcesToRequest := make([]string, len(resources)) + i := 0 + for name := range resources { + resourcesToRequest[i] = name + i++ + } + a.transport.SendRequest(rType.TypeURL(), resourcesToRequest) +} + +func (a *authority) reportLoad() (*load.Store, func()) { + return a.transport.ReportLoad() +} + +func (a *authority) dumpResources() map[string]map[string]xdsresource.UpdateWithMD { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + + dump := make(map[string]map[string]xdsresource.UpdateWithMD) + for rType, resourceStates := range a.resources { + states := make(map[string]xdsresource.UpdateWithMD) + for name, state := range resourceStates { + var raw *anypb.Any + if state.cache != nil { + raw = state.cache.Raw() + } + states[name] = xdsresource.UpdateWithMD{ + MD: state.md, + Raw: raw, + } + } + dump[rType.TypeURL()] = states + } + return dump +} + +func combineErrors(rType string, topLevelErrors []error, perResourceErrors map[string]error) error { + var errStrB strings.Builder + errStrB.WriteString(fmt.Sprintf("error parsing %q response: ", rType)) + if len(topLevelErrors) > 0 { + errStrB.WriteString("top level errors: ") + for i, err := range topLevelErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + errStrB.WriteString(err.Error()) + } + } + if len(perResourceErrors) > 0 { + var i int + for name, err := range perResourceErrors { + if i != 0 { + errStrB.WriteString(";\n") + } + i++ + errStrB.WriteString(fmt.Sprintf("resource %q: %v", name, err.Error())) + } + } + return errors.New(errStrB.String()) +} diff --git a/xds/internal/xdsclient/authority_test.go b/xds/internal/xdsclient/authority_test.go new file mode 100644 index 000000000000..09a81759a1f3 --- /dev/null +++ b/xds/internal/xdsclient/authority_test.go @@ -0,0 +1,297 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc/internal/grpcsync" + util "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal" + + "google.golang.org/grpc/xds/internal/testutils" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. +) + +var emptyServerOpts = e2e.ManagementServerOptions{} + +var ( + // Listener resource type implementation retrieved from the resource type map + // in the internal package, which is initialized when the individual resource + // types are created. + listenerResourceType = internal.ResourceTypeMapForTesting[version.V3ListenerURL].(xdsresource.Type) + rtRegistry = newResourceTypeRegistry() +) + +func init() { + // Simulating maybeRegister for listenerResourceType. The getter to this registry + // is passed to the authority for accessing the resource type. + rtRegistry.types[listenerResourceType.TypeURL()] = listenerResourceType +} + +func setupTest(ctx context.Context, t *testing.T, opts e2e.ManagementServerOptions, watchExpiryTimeout time.Duration) (*authority, *e2e.ManagementServer, string) { + t.Helper() + nodeID := uuid.New().String() + ms, err := e2e.StartManagementServer(opts) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %q", err) + } + + a, err := newAuthority(authorityArgs{ + serverCfg: xdstestutils.ServerConfigForAddress(t, ms.Address), + bootstrapCfg: &bootstrap.Config{ + NodeProto: &v3corepb.Node{Id: nodeID}, + }, + serializer: grpcsync.NewCallbackSerializer(ctx), + resourceTypeGetter: rtRegistry.get, + watchExpiryTimeout: watchExpiryTimeout, + logger: nil, + }) + if err != nil { + t.Fatalf("Failed to create authority: %q", err) + } + return a, ms, nodeID +} + +// This tests verifies watch and timer state for the scenario where a watch for +// an LDS resource is registered and the management server sends an update the +// same resource. +func (s) TestTimerAndWatchStateOnSendCallback(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + a, ms, nodeID := setupTest(ctx, t, emptyServerOpts, defaultTestTimeout) + defer ms.Stop() + defer a.close() + + rn := "xdsclient-test-lds-resource" + w := testutils.NewTestResourceWatcher() + cancelResource := a.watchResource(listenerResourceType, rn, w) + defer cancelResource() + + // Looping until the underlying transport has successfully sent the request to + // the server, which would call the onSend callback and transition the watchState + // to `watchStateRequested`. + for ctx.Err() == nil { + if err := compareWatchState(a, rn, watchStateRequested); err == nil { + break + } + } + if ctx.Err() != nil { + t.Fatalf("Test timed out before state transiton to %q was verified.", watchStateRequested) + } + + // Updating mgmt server with the same lds resource. Blocking on watcher's update + // ch to verify the watch state transition to `watchStateReceived`. + if err := updateResourceInServer(ctx, ms, rn, nodeID); err != nil { + t.Fatalf("Failed to update server with resource: %q; err: %q", rn, err) + } + for { + select { + case <-ctx.Done(): + t.Fatal("Test timed out before watcher received an update from server.") + case <-w.ErrorCh: + case <-w.UpdateCh: + // This means the OnUpdate callback was invoked and the watcher was notified. + if err := compareWatchState(a, rn, watchStateReceived); err != nil { + t.Fatal(err) + } + return + } + } +} + +// This tests the resource's watch state transition when the ADS stream is closed +// by the management server. After the test calls `watchResource` api to register +// a watch for a resource, it stops the management server, and verifies the resource's +// watch state transitions to `watchStateStarted` and timer ready to be restarted. +func (s) TestTimerAndWatchStateOnErrorCallback(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + a, ms, _ := setupTest(ctx, t, emptyServerOpts, defaultTestTimeout) + defer a.close() + + rn := "xdsclient-test-lds-resource" + w := testutils.NewTestResourceWatcher() + cancelResource := a.watchResource(listenerResourceType, rn, w) + defer cancelResource() + + // Stopping the server and blocking on watcher's err channel to be notified. + // This means the onErr callback should be invoked which transitions the watch + // state to `watchStateStarted`. + ms.Stop() + + select { + case <-ctx.Done(): + t.Fatal("Test timed out before verifying error propagation.") + case err := <-w.ErrorCh: + if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { + t.Fatal("Connection error not propagated to watchers.") + } + } + + if err := compareWatchState(a, rn, watchStateStarted); err != nil { + t.Fatal(err) + } +} + +// This tests the case where the ADS stream breaks after successfully receiving +// a message on the stream. The test performs the following: +// - configures the management server with the ability to dropRequests based on +// a boolean flag. +// - update the mgmt server with resourceA. +// - registers a watch for resourceA and verifies that the watcher's update +// callback is invoked. +// - registers a watch for resourceB and verifies that the watcher's update +// callback is not invoked. This is because the management server does not +// contain resourceB. +// - force mgmt server to drop requests. Verify that watcher for resourceB gets +// connection error. +// - resume mgmt server to accept requests. +// - update the mgmt server with resourceB and verifies that the watcher's +// update callback is invoked. +func (s) TestWatchResourceTimerCanRestartOnIgnoredADSRecvError(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + // Create a restartable listener which can close existing connections. + l, err := util.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis := util.NewRestartableListener(l) + defer lis.Close() + streamRestarted := grpcsync.NewEvent() + serverOpt := e2e.ManagementServerOptions{ + Listener: lis, + OnStreamClosed: func(int64, *v3corepb.Node) { + streamRestarted.Fire() + }, + } + + a, ms, nodeID := setupTest(ctx, t, serverOpt, defaultTestTimeout) + defer ms.Stop() + defer a.close() + + nameA := "xdsclient-test-lds-resourceA" + watcherA := testutils.NewTestResourceWatcher() + cancelA := a.watchResource(listenerResourceType, nameA, watcherA) + + if err := updateResourceInServer(ctx, ms, nameA, nodeID); err != nil { + t.Fatalf("Failed to update server with resource: %q; err: %q", nameA, err) + } + + // Blocking on resource A watcher's update Channel to verify that there is + // more than one msg(s) received the ADS stream. + select { + case <-ctx.Done(): + t.Fatal("Test timed out before watcher received the update.") + case err := <-watcherA.ErrorCh: + t.Fatalf("Watch got an unexpected error update: %q; want: valid update.", err) + case <-watcherA.UpdateCh: + } + + cancelA() + lis.Stop() + + nameB := "xdsclient-test-lds-resourceB" + watcherB := testutils.NewTestResourceWatcher() + cancelB := a.watchResource(listenerResourceType, nameB, watcherB) + defer cancelB() + + // Blocking on resource B watcher's error channel. This error should be due to + // connectivity issue when reconnecting because the mgmt server was already been + // stopped. Also verifying that OnResourceDoesNotExist() method was not invoked + // on the watcher. + select { + case <-ctx.Done(): + t.Fatal("Test timed out before mgmt server got the request.") + case u := <-watcherB.UpdateCh: + t.Fatalf("Watch got an unexpected resource update: %v.", u) + case <-watcherB.ResourceDoesNotExistCh: + t.Fatalf("Illegal invocation of OnResourceDoesNotExist() method on the watcher.") + case gotErr := <-watcherB.ErrorCh: + wantErr := xdsresource.ErrorTypeConnection + if xdsresource.ErrType(gotErr) != wantErr { + t.Fatalf("Watch got an unexpected error:%q. Want: %q.", gotErr, wantErr) + } + } + + // Updating server with resource B and also re-enabling requests on the server. + if err := updateResourceInServer(ctx, ms, nameB, nodeID); err != nil { + t.Fatalf("Failed to update server with resource: %q; err: %q", nameB, err) + } + lis.Restart() + + for { + select { + case <-ctx.Done(): + t.Fatal("Test timed out before watcher received the update.") + case <-watcherB.UpdateCh: + return + } + } +} + +func compareWatchState(a *authority, rn string, wantState watchState) error { + a.resourcesMu.Lock() + defer a.resourcesMu.Unlock() + gotState := a.resources[listenerResourceType][rn].wState + if gotState != wantState { + return fmt.Errorf("Got %v. Want: %v", gotState, wantState) + } + + wTimer := a.resources[listenerResourceType][rn].wTimer + switch gotState { + case watchStateRequested: + if wTimer == nil { + return fmt.Errorf("got nil timer, want active timer") + } + case watchStateStarted: + if wTimer != nil { + return fmt.Errorf("got active timer, want nil timer") + } + default: + if wTimer.Stop() { + // This means that the timer was running but could be successfully stopped. + return fmt.Errorf("got active timer, want stopped timer") + } + } + return nil +} + +func updateResourceInServer(ctx context.Context, ms *e2e.ManagementServer, rn string, nID string) error { + l := e2e.DefaultClientListener(rn, "new-rds-resource") + resources := e2e.UpdateOptions{ + NodeID: nID, + Listeners: []*v3listenerpb.Listener{l}, + SkipValidation: true, + } + return ms.Update(ctx, resources) +} diff --git a/xds/internal/xdsclient/bootstrap/bootstrap.go b/xds/internal/xdsclient/bootstrap/bootstrap.go new file mode 100644 index 000000000000..aec2fa51f30f --- /dev/null +++ b/xds/internal/xdsclient/bootstrap/bootstrap.go @@ -0,0 +1,564 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package bootstrap provides the functionality to initialize certain aspects +// of an xDS client by reading a bootstrap file. +package bootstrap + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + "os" + "strings" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "github.com/golang/protobuf/jsonpb" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/google" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/credentials/tls/certprovider" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/bootstrap" +) + +const ( + // The "server_features" field in the bootstrap file contains a list of + // features supported by the server: + // - A value of "xds_v3" indicates that the server supports the v3 version of + // the xDS transport protocol. + // - A value of "ignore_resource_deletion" indicates that the client should + // ignore deletion of Listener and Cluster resources in updates from the + // server. + serverFeaturesV3 = "xds_v3" + serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion" + + gRPCUserAgentName = "gRPC Go" + clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" +) + +func init() { + bootstrap.RegisterCredentials(&insecureCredsBuilder{}) + bootstrap.RegisterCredentials(&googleDefaultCredsBuilder{}) +} + +// For overriding in unit tests. +var bootstrapFileReadFunc = os.ReadFile + +// insecureCredsBuilder implements the `Credentials` interface defined in +// package `xds/bootstrap` and encapsulates an insecure credential. +type insecureCredsBuilder struct{} + +func (i *insecureCredsBuilder) Build(json.RawMessage) (credentials.Bundle, error) { + return insecure.NewBundle(), nil +} + +func (i *insecureCredsBuilder) Name() string { + return "insecure" +} + +// googleDefaultCredsBuilder implements the `Credentials` interface defined in +// package `xds/boostrap` and encapsulates a Google Default credential. +type googleDefaultCredsBuilder struct{} + +func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, error) { + return google.NewDefaultCredentials(), nil +} + +func (d *googleDefaultCredsBuilder) Name() string { + return "google_default" +} + +// ChannelCreds contains the credentials to be used while communicating with an +// xDS server. It is also used to dedup servers with the same server URI. +type ChannelCreds struct { + // Type contains a unique name identifying the credentials type. The only + // supported types currently are "google_default" and "insecure". + Type string + // Config contains the JSON configuration associated with the credentials. + Config json.RawMessage +} + +// Equal reports whether cc and other are considered equal. +func (cc ChannelCreds) Equal(other ChannelCreds) bool { + return cc.Type == other.Type && bytes.Equal(cc.Config, other.Config) +} + +// String returns a string representation of the credentials. It contains the +// type and the config (if non-nil) separated by a "-". +func (cc ChannelCreds) String() string { + if cc.Config == nil { + return cc.Type + } + + // We do not expect the Marshal call to fail since we wrote to cc.Config + // after a successful unmarshaling from JSON configuration. Therefore, + // it is safe to ignore the error here. + b, _ := json.Marshal(cc.Config) + return cc.Type + "-" + string(b) +} + +// ServerConfig contains the configuration to connect to a server, including +// URI, creds, and transport API version (e.g. v2 or v3). +// +// It contains unexported fields that are initialized when unmarshaled from JSON +// using either the UnmarshalJSON() method or the ServerConfigFromJSON() +// function. Hence users are strongly encouraged not to use a literal struct +// initialization to create an instance of this type, but instead unmarshal from +// JSON using one of the two available options. +type ServerConfig struct { + // ServerURI is the management server to connect to. + // + // The bootstrap file contains an ordered list of xDS servers to contact for + // this authority. The first one is picked. + ServerURI string + // Creds contains the credentials to be used while communicationg with this + // xDS server. It is also used to dedup servers with the same server URI. + Creds ChannelCreds + // ServerFeatures contains a list of features supported by this xDS server. + // It is also used to dedup servers with the same server URI and creds. + ServerFeatures []string + + // As part of unmarshaling the JSON config into this struct, we ensure that + // the credentials config is valid by building an instance of the specified + // credentials and store it here as a grpc.DialOption for easy access when + // dialing this xDS server. + credsDialOption grpc.DialOption + + // IgnoreResourceDeletion controls the behavior of the xDS client when the + // server deletes a previously sent Listener or Cluster resource. If set, the + // xDS client will not invoke the watchers' OnResourceDoesNotExist() method + // when a resource is deleted, nor will it remove the existing resource value + // from its cache. + IgnoreResourceDeletion bool +} + +// CredsDialOption returns the configured credentials as a grpc dial option. +func (sc *ServerConfig) CredsDialOption() grpc.DialOption { + return sc.credsDialOption +} + +// String returns the string representation of the ServerConfig. +// +// This string representation will be used as map keys in federation +// (`map[ServerConfig]authority`), so that the xDS ClientConn and stream will be +// shared by authorities with different names but the same server config. +// +// It covers (almost) all the fields so the string can represent the config +// content. It doesn't cover NodeProto because NodeProto isn't used by +// federation. +func (sc *ServerConfig) String() string { + features := strings.Join(sc.ServerFeatures, "-") + return strings.Join([]string{sc.ServerURI, sc.Creds.String(), features}, "-") +} + +// MarshalJSON marshals the ServerConfig to json. +func (sc ServerConfig) MarshalJSON() ([]byte, error) { + server := xdsServer{ + ServerURI: sc.ServerURI, + ChannelCreds: []channelCreds{{Type: sc.Creds.Type, Config: sc.Creds.Config}}, + ServerFeatures: sc.ServerFeatures, + } + server.ServerFeatures = []string{serverFeaturesV3} + if sc.IgnoreResourceDeletion { + server.ServerFeatures = append(server.ServerFeatures, serverFeaturesIgnoreResourceDeletion) + } + return json.Marshal(server) +} + +// UnmarshalJSON takes the json data (a server) and unmarshals it to the struct. +func (sc *ServerConfig) UnmarshalJSON(data []byte) error { + var server xdsServer + if err := json.Unmarshal(data, &server); err != nil { + return fmt.Errorf("xds: json.Unmarshal(data) for field ServerConfig failed during bootstrap: %v", err) + } + + sc.ServerURI = server.ServerURI + sc.ServerFeatures = server.ServerFeatures + for _, f := range server.ServerFeatures { + if f == serverFeaturesIgnoreResourceDeletion { + sc.IgnoreResourceDeletion = true + } + } + for _, cc := range server.ChannelCreds { + // We stop at the first credential type that we support. + c := bootstrap.GetCredentials(cc.Type) + if c == nil { + continue + } + bundle, err := c.Build(cc.Config) + if err != nil { + return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) + } + sc.Creds = ChannelCreds(cc) + sc.credsDialOption = grpc.WithCredentialsBundle(bundle) + break + } + return nil +} + +// ServerConfigFromJSON creates a new ServerConfig from the given JSON +// configuration. This is the preferred way of creating a ServerConfig when +// hand-crafting the JSON configuration. +func ServerConfigFromJSON(data []byte) (*ServerConfig, error) { + sc := new(ServerConfig) + if err := sc.UnmarshalJSON(data); err != nil { + return nil, err + } + return sc, nil +} + +// Equal reports whether sc and other are considered equal. +func (sc *ServerConfig) Equal(other *ServerConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + case sc.ServerURI != other.ServerURI: + return false + case !sc.Creds.Equal(other.Creds): + return false + case !equalStringSlice(sc.ServerFeatures, other.ServerFeatures): + return false + } + return true +} + +func equalStringSlice(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +// unmarshalJSONServerConfigSlice unmarshals JSON to a slice. +func unmarshalJSONServerConfigSlice(data []byte) ([]*ServerConfig, error) { + var servers []*ServerConfig + if err := json.Unmarshal(data, &servers); err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON to []*ServerConfig: %v", err) + } + if len(servers) < 1 { + return nil, fmt.Errorf("no management server found in JSON") + } + return servers, nil +} + +// Authority contains configuration for an Authority for an xDS control plane +// server. See the Authorities field in the Config struct for how it's used. +type Authority struct { + // ClientListenerResourceNameTemplate is template for the name of the + // Listener resource to subscribe to for a gRPC client channel. Used only + // when the channel is created using an "xds:" URI with this authority name. + // + // The token "%s", if present in this string, will be replaced + // with %-encoded service authority (i.e., the path part of the target + // URI used to create the gRPC channel). + // + // Must start with "xdstp:///". If it does not, + // that is considered a bootstrap file parsing error. + // + // If not present in the bootstrap file, defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + ClientListenerResourceNameTemplate string + // XDSServer contains the management server and config to connect to for + // this authority. + XDSServer *ServerConfig +} + +// UnmarshalJSON implement json unmarshaller. +func (a *Authority) UnmarshalJSON(data []byte) error { + var jsonData map[string]json.RawMessage + if err := json.Unmarshal(data, &jsonData); err != nil { + return fmt.Errorf("xds: failed to parse authority: %v", err) + } + + for k, v := range jsonData { + switch k { + case "xds_servers": + servers, err := unmarshalJSONServerConfigSlice(v) + if err != nil { + return fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) + } + a.XDSServer = servers[0] + case "client_listener_resource_name_template": + if err := json.Unmarshal(v, &a.ClientListenerResourceNameTemplate); err != nil { + return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + } + } + return nil +} + +// Config provides the xDS client with several key bits of information that it +// requires in its interaction with the management server. The Config is +// initialized from the bootstrap file. +type Config struct { + // XDSServer is the management server to connect to. + // + // The bootstrap file contains a list of servers (with name+creds), but we + // pick the first one. + XDSServer *ServerConfig + // CertProviderConfigs contains a mapping from certificate provider plugin + // instance names to parsed buildable configs. + CertProviderConfigs map[string]*certprovider.BuildableConfig + // ServerListenerResourceNameTemplate is a template for the name of the + // Listener resource to subscribe to for a gRPC server. + // + // If starts with "xdstp:", will be interpreted as a new-style name, + // in which case the authority of the URI will be used to select the + // relevant configuration in the "authorities" map. + // + // The token "%s", if present in this string, will be replaced with the IP + // and port on which the server is listening. (e.g., "0.0.0.0:8080", + // "[::]:8080"). For example, a value of "example/resource/%s" could become + // "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", + // the replaced string will be %-encoded. + // + // There is no default; if unset, xDS-based server creation fails. + ServerListenerResourceNameTemplate string + // A template for the name of the Listener resource to subscribe to + // for a gRPC client channel. Used only when the channel is created + // with an "xds:" URI with no authority. + // + // If starts with "xdstp:", will be interpreted as a new-style name, + // in which case the authority of the URI will be used to select the + // relevant configuration in the "authorities" map. + // + // The token "%s", if present in this string, will be replaced with + // the service authority (i.e., the path part of the target URI + // used to create the gRPC channel). If the template starts with + // "xdstp:", the replaced string will be %-encoded. + // + // Defaults to "%s". + ClientDefaultListenerResourceNameTemplate string + // Authorities is a map of authority name to corresponding configuration. + // + // This is used in the following cases: + // - A gRPC client channel is created using an "xds:" URI that includes + // an authority. + // - A gRPC client channel is created using an "xds:" URI with no + // authority, but the "client_default_listener_resource_name_template" + // field above turns it into an "xdstp:" URI. + // - A gRPC server is created and the + // "server_listener_resource_name_template" field is an "xdstp:" URI. + // + // In any of those cases, it is an error if the specified authority is + // not present in this map. + Authorities map[string]*Authority + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node +} + +type channelCreds struct { + Type string `json:"type"` + Config json.RawMessage `json:"config,omitempty"` +} + +type xdsServer struct { + ServerURI string `json:"server_uri"` + ChannelCreds []channelCreds `json:"channel_creds"` + ServerFeatures []string `json:"server_features"` +} + +func bootstrapConfigFromEnvVariable() ([]byte, error) { + fName := envconfig.XDSBootstrapFileName + fContent := envconfig.XDSBootstrapFileContent + + // Bootstrap file name has higher priority than bootstrap content. + if fName != "" { + // If file name is set + // - If file not found (or other errors), fail + // - Otherwise, use the content. + // + // Note that even if the content is invalid, we don't failover to the + // file content env variable. + logger.Debugf("Using bootstrap file with name %q", fName) + return bootstrapFileReadFunc(fName) + } + + if fContent != "" { + return []byte(fContent), nil + } + + return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined", + envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) +} + +// NewConfig returns a new instance of Config initialized by reading the +// bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents specified +// at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the former is +// preferred. +// +// We support a credential registration mechanism and only credentials +// registered through that mechanism will be accepted here. See package +// `xds/bootstrap` for details. +// +// This function tries to process as much of the bootstrap file as possible (in +// the presence of the errors) and may return a Config object with certain +// fields left unspecified, in which case the caller should use some sane +// defaults. +func NewConfig() (*Config, error) { + // Examples of the bootstrap json can be found in the generator tests + // https://github.com/GoogleCloudPlatform/traffic-director-grpc-bootstrap/blob/master/main_test.go. + data, err := bootstrapConfigFromEnvVariable() + if err != nil { + return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) + } + return newConfigFromContents(data) +} + +// NewConfigFromContentsForTesting returns a new Config using the specified +// bootstrap file contents instead of reading the environment variable. +// +// This is only suitable for testing purposes. +func NewConfigFromContentsForTesting(data []byte) (*Config, error) { + return newConfigFromContents(data) +} + +func newConfigFromContents(data []byte) (*Config, error) { + config := &Config{} + + var jsonData map[string]json.RawMessage + if err := json.Unmarshal(data, &jsonData); err != nil { + return nil, fmt.Errorf("xds: failed to parse bootstrap config: %v", err) + } + + var node *v3corepb.Node + m := jsonpb.Unmarshaler{AllowUnknownFields: true} + for k, v := range jsonData { + switch k { + case "node": + node = &v3corepb.Node{} + if err := m.Unmarshal(bytes.NewReader(v), node); err != nil { + return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + case "xds_servers": + servers, err := unmarshalJSONServerConfigSlice(v) + if err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) + } + config.XDSServer = servers[0] + case "certificate_providers": + var providerInstances map[string]json.RawMessage + if err := json.Unmarshal(v, &providerInstances); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + configs := make(map[string]*certprovider.BuildableConfig) + getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) + for instance, data := range providerInstances { + var nameAndConfig struct { + PluginName string `json:"plugin_name"` + Config json.RawMessage `json:"config"` + } + if err := json.Unmarshal(data, &nameAndConfig); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err) + } + + name := nameAndConfig.PluginName + parser := getBuilder(nameAndConfig.PluginName) + if parser == nil { + // We ignore plugins that we do not know about. + continue + } + bc, err := parser.ParseConfig(nameAndConfig.Config) + if err != nil { + return nil, fmt.Errorf("xds: config parsing for plugin %q failed: %v", name, err) + } + configs[instance] = bc + } + config.CertProviderConfigs = configs + case "server_listener_resource_name_template": + if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + case "client_default_listener_resource_name_template": + if !envconfig.XDSFederation { + logger.Warningf("Bootstrap field %v is not support when Federation is disabled", k) + continue + } + if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + case "authorities": + if !envconfig.XDSFederation { + logger.Warningf("Bootstrap field %v is not support when Federation is disabled", k) + continue + } + if err := json.Unmarshal(v, &config.Authorities); err != nil { + return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) + } + default: + logger.Warningf("Bootstrap content has unknown field: %s", k) + } + // Do not fail the xDS bootstrap when an unknown field is seen. This can + // happen when an older version client reads a newer version bootstrap + // file with new fields. + } + + if config.ClientDefaultListenerResourceNameTemplate == "" { + // Default value of the default client listener name template is "%s". + config.ClientDefaultListenerResourceNameTemplate = "%s" + } + if config.XDSServer == nil { + return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"]) + } + if config.XDSServer.ServerURI == "" { + return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) + } + if config.XDSServer.CredsDialOption() == nil { + return nil, fmt.Errorf("xds: required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) + } + // Post-process the authorities' client listener resource template field: + // - if set, it must start with "xdstp:///" + // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" + for name, authority := range config.Authorities { + prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) + if authority.ClientListenerResourceNameTemplate == "" { + authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" + continue + } + if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { + return nil, fmt.Errorf("xds: field ClientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) + } + } + + // Performing post-production on the node information. Some additional fields + // which are not expected to be set in the bootstrap file are populated here. + if node == nil { + node = &v3corepb.Node{} + } + node.UserAgentName = gRPCUserAgentName + node.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} + node.ClientFeatures = append(node.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) + config.NodeProto = node + + logger.Debugf("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) + return config, nil +} diff --git a/xds/internal/client/bootstrap/bootstrap_test.go b/xds/internal/xdsclient/bootstrap/bootstrap_test.go similarity index 52% rename from xds/internal/client/bootstrap/bootstrap_test.go rename to xds/internal/xdsclient/bootstrap/bootstrap_test.go index 501d62102d21..026460458b08 100644 --- a/xds/internal/client/bootstrap/bootstrap_test.go +++ b/xds/internal/xdsclient/bootstrap/bootstrap_test.go @@ -25,23 +25,52 @@ import ( "os" "testing" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" "github.com/golang/protobuf/proto" - structpb "github.com/golang/protobuf/ptypes/struct" "github.com/google/go-cmp/cmp" - + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc" - "google.golang.org/grpc/credentials/google" - "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/xds/env" - "google.golang.org/grpc/xds/internal/version" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/xds/bootstrap" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + structpb "github.com/golang/protobuf/ptypes/struct" ) var ( - v2BootstrapFileMap = map[string]string{ + v3BootstrapFileMap = map[string]string{ + "serverFeaturesIncludesXDSV3": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ + { "type": "google_default" } + ], + "server_features" : ["xds_v3"] + }] + }`, + "serverFeaturesExcludesXDSV3": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ + { "type": "google_default" } + ] + }] + }`, "emptyNodeProto": ` { "xds_servers" : [{ @@ -112,7 +141,8 @@ var ( "channel_creds": [ { "type": "not-google-default" }, { "type": "google_default" } - ] + ], + "server_features": ["xds_v3"] }] }`, "goodBootstrap": ` @@ -127,7 +157,8 @@ var ( "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ { "type": "google_default" } - ] + ], + "server_features": ["xds_v3"] }] }`, "multipleXDSServers": ` @@ -141,7 +172,8 @@ var ( "xds_servers" : [ { "server_uri": "trafficdirector.googleapis.com:443", - "channel_creds": [{ "type": "google_default" }] + "channel_creds": [{ "type": "google_default" }], + "server_features": ["xds_v3"] }, { "server_uri": "backup.never.use.com:1234", @@ -149,25 +181,7 @@ var ( } ] }`, - } - v3BootstrapFileMap = map[string]string{ - "serverDoesNotSupportsV3": ` - { - "node": { - "id": "ENVOY_NODE_ID", - "metadata": { - "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" - } - }, - "xds_servers" : [{ - "server_uri": "trafficdirector.googleapis.com:443", - "channel_creds": [ - { "type": "google_default" } - ], - "server_features" : ["foo", "bar"] - }] - }`, - "serverSupportsV3": ` + "serverSupportsIgnoreResourceDeletion": ` { "node": { "id": "ENVOY_NODE_ID", @@ -180,7 +194,7 @@ var ( "channel_creds": [ { "type": "google_default" } ], - "server_features" : ["foo", "bar", "xds_v3"] + "server_features" : ["ignore_resource_deletion", "xds_v3"] }] }`, } @@ -191,75 +205,58 @@ var ( }, }, } - v2NodeProto = &v2corepb.Node{ - Id: "ENVOY_NODE_ID", - Metadata: metadata, - BuildVersion: gRPCVersion, - UserAgentName: gRPCUserAgentName, - UserAgentVersionType: &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning}, - } v3NodeProto = &v3corepb.Node{ Id: "ENVOY_NODE_ID", Metadata: metadata, UserAgentName: gRPCUserAgentName, UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning}, - } - nilCredsConfigV2 = &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: v2NodeProto, + ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, } - nonNilCredsConfigV2 = &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - NodeProto: v2NodeProto, + nilCredsConfigNoServerFeatures = &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "insecure"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "%s", } nonNilCredsConfigV3 = &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - TransportAPI: version.TransportV3, - NodeProto: v3NodeProto, - } -) - -func (c *Config) compare(want *Config) error { - if c.BalancerName != want.BalancerName { - return fmt.Errorf("config.BalancerName is %s, want %s", c.BalancerName, want.BalancerName) - } - // Since Creds is of type grpc.DialOption interface, where the - // implementation is provided by a function, it is not possible to compare. - if (c.Creds != nil) != (want.Creds != nil) { - return fmt.Errorf("config.Creds is %#v, want %#v", c.Creds, want.Creds) - } - if c.TransportAPI != want.TransportAPI { - return fmt.Errorf("config.TransportAPI is %v, want %v", c.TransportAPI, want.TransportAPI) - + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + ServerFeatures: []string{"xds_v3"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "%s", } - if diff := cmp.Diff(want.NodeProto, c.NodeProto, cmp.Comparer(proto.Equal)); diff != "" { - return fmt.Errorf("config.NodeProto diff (-want, +got):\n%s", diff) + nonNilCredsConfigWithDeletionIgnored = &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + IgnoreResourceDeletion: true, + ServerFeatures: []string{"ignore_resource_deletion", "xds_v3"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "%s", } - if c.ServerListenerResourceNameTemplate != want.ServerListenerResourceNameTemplate { - return fmt.Errorf("config.ServerListenerResourceNameTemplate is %q, want %q", c.ServerListenerResourceNameTemplate, want.ServerListenerResourceNameTemplate) + nonNilCredsConfigNoServerFeatures = &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "%s", } +) - // A vanilla cmp.Equal or cmp.Diff will not produce useful error message - // here. So, we iterate through the list of configs and compare them one at - // a time. - gotCfgs := c.CertProviderConfigs - wantCfgs := want.CertProviderConfigs - if len(gotCfgs) != len(wantCfgs) { - return fmt.Errorf("config.CertProviderConfigs is %d entries, want %d", len(gotCfgs), len(wantCfgs)) - } - for instance, gotCfg := range gotCfgs { - wantCfg, ok := wantCfgs[instance] - if !ok { - return fmt.Errorf("config.CertProviderConfigs has unexpected plugin instance %q with config %q", instance, gotCfg.String()) - } - if got, want := gotCfg.String(), wantCfg.String(); got != want { - return fmt.Errorf("config.CertProviderConfigs for plugin instance %q has config %q, want %q", instance, got, want) - } +func (c *Config) compare(want *Config) error { + if diff := cmp.Diff(want, c, + cmpopts.EquateEmpty(), + cmp.Comparer(proto.Equal), + cmp.Comparer(func(a, b grpc.DialOption) bool { return (a != nil) == (b != nil) }), + cmp.Transformer("certproviderconfigstring", func(a *certprovider.BuildableConfig) string { return a.String() }), + ); diff != "" { + return fmt.Errorf("unexpected diff in config (-want, +got):\n%s", diff) } return nil } @@ -285,9 +282,9 @@ func setupBootstrapOverride(bootstrapFileMap map[string]string) func() { // This function overrides the bootstrap file NAME env variable, to test the // code that reads file with the given fileName. func testNewConfigWithFileNameEnv(t *testing.T, fileName string, wantError bool, wantConfig *Config) { - origBootstrapFileName := env.BootstrapFileName - env.BootstrapFileName = fileName - defer func() { env.BootstrapFileName = origBootstrapFileName }() + origBootstrapFileName := envconfig.XDSBootstrapFileName + envconfig.XDSBootstrapFileName = fileName + defer func() { envconfig.XDSBootstrapFileName = origBootstrapFileName }() c, err := NewConfig() if (err != nil) != wantError { @@ -304,14 +301,14 @@ func testNewConfigWithFileNameEnv(t *testing.T, fileName string, wantError bool, // This function overrides the bootstrap file CONTENT env variable, to test the // code that uses the content from env directly. func testNewConfigWithFileContentEnv(t *testing.T, fileName string, wantError bool, wantConfig *Config) { + t.Helper() b, err := bootstrapFileReadFunc(fileName) if err != nil { - // If file reading failed, skip this test. - return + t.Skip(err) } - origBootstrapContent := env.BootstrapFileContent - env.BootstrapFileContent = string(b) - defer func() { env.BootstrapFileContent = origBootstrapContent }() + origBootstrapContent := envconfig.XDSBootstrapFileContent + envconfig.XDSBootstrapFileContent = string(b) + defer func() { envconfig.XDSBootstrapFileContent = origBootstrapContent }() c, err := NewConfig() if (err != nil) != wantError { @@ -325,9 +322,9 @@ func testNewConfigWithFileContentEnv(t *testing.T, fileName string, wantError bo } } -// TestNewConfigV2ProtoFailure exercises the functionality in NewConfig with +// TestNewConfigV3ProtoFailure exercises the functionality in NewConfig with // different bootstrap file contents which are expected to fail. -func TestNewConfigV2ProtoFailure(t *testing.T) { +func TestNewConfigV3ProtoFailure(t *testing.T) { bootstrapFileMap := map[string]string{ "empty": "", "badJSON": `["test": 123]`, @@ -391,11 +388,11 @@ func TestNewConfigV2ProtoFailure(t *testing.T) { } } -// TestNewConfigV2ProtoSuccess exercises the functionality in NewConfig with +// TestNewConfigV3ProtoSuccess exercises the functionality in NewConfig with // different bootstrap file contents. It overrides the fileReadFunc by returning // bootstrap file contents defined in this test, instead of reading from a file. -func TestNewConfigV2ProtoSuccess(t *testing.T) { - cancel := setupBootstrapOverride(v2BootstrapFileMap) +func TestNewConfigV3ProtoSuccess(t *testing.T) { + cancel := setupBootstrapOverride(v3BootstrapFileMap) defer cancel() tests := []struct { @@ -404,45 +401,25 @@ func TestNewConfigV2ProtoSuccess(t *testing.T) { }{ { "emptyNodeProto", &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: &v2corepb.Node{ - BuildVersion: gRPCVersion, + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "insecure"}, + }, + NodeProto: &v3corepb.Node{ UserAgentName: gRPCUserAgentName, - UserAgentVersionType: &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, - ClientFeatures: []string{clientFeatureNoOverprovisioning}, + UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}, + ClientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, }, + ClientDefaultListenerResourceNameTemplate: "%s", }, }, - {"unknownTopLevelFieldInFile", nilCredsConfigV2}, - {"unknownFieldInNodeProto", nilCredsConfigV2}, - {"unknownFieldInXdsServer", nilCredsConfigV2}, - {"multipleChannelCreds", nonNilCredsConfigV2}, - {"goodBootstrap", nonNilCredsConfigV2}, - {"multipleXDSServers", nonNilCredsConfigV2}, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testNewConfigWithFileNameEnv(t, test.name, false, test.wantConfig) - testNewConfigWithFileContentEnv(t, test.name, false, test.wantConfig) - }) - } -} - -// TestNewConfigV3Support verifies bootstrap functionality involving support for -// the xDS v3 transport protocol. Here the client ends up using v2 or v3 based -// on what the server supports. -func TestNewConfigV3Support(t *testing.T) { - cancel := setupBootstrapOverride(v3BootstrapFileMap) - defer cancel() - - tests := []struct { - name string - wantConfig *Config - }{ - {"serverDoesNotSupportsV3", nonNilCredsConfigV2}, - {"serverSupportsV3", nonNilCredsConfigV3}, + {"unknownTopLevelFieldInFile", nilCredsConfigNoServerFeatures}, + {"unknownFieldInNodeProto", nilCredsConfigNoServerFeatures}, + {"unknownFieldInXdsServer", nilCredsConfigNoServerFeatures}, + {"multipleChannelCreds", nonNilCredsConfigV3}, + {"goodBootstrap", nonNilCredsConfigV3}, + {"multipleXDSServers", nonNilCredsConfigV3}, + {"serverSupportsIgnoreResourceDeletion", nonNilCredsConfigWithDeletionIgnored}, } for _, test := range tests { @@ -456,29 +433,30 @@ func TestNewConfigV3Support(t *testing.T) { // TestNewConfigBootstrapEnvPriority tests that the two env variables are read // in correct priority. // -// the case where the bootstrap file -// environment variable is not set. +// "GRPC_XDS_BOOTSTRAP" which specifies the file name containing the bootstrap +// configuration takes precedence over "GRPC_XDS_BOOTSTRAP_CONFIG", which +// directly specifies the bootstrap configuration in itself. func TestNewConfigBootstrapEnvPriority(t *testing.T) { oldFileReadFunc := bootstrapFileReadFunc bootstrapFileReadFunc = func(filename string) ([]byte, error) { - return fileReadFromFileMap(v2BootstrapFileMap, filename) + return fileReadFromFileMap(v3BootstrapFileMap, filename) } defer func() { bootstrapFileReadFunc = oldFileReadFunc }() - goodFileName1 := "goodBootstrap" - goodConfig1 := nonNilCredsConfigV2 + goodFileName1 := "serverFeaturesIncludesXDSV3" + goodConfig1 := nonNilCredsConfigV3 - goodFileName2 := "serverSupportsV3" + goodFileName2 := "serverFeaturesExcludesXDSV3" goodFileContent2 := v3BootstrapFileMap[goodFileName2] - goodConfig2 := nonNilCredsConfigV3 + goodConfig2 := nonNilCredsConfigNoServerFeatures - origBootstrapFileName := env.BootstrapFileName - env.BootstrapFileName = "" - defer func() { env.BootstrapFileName = origBootstrapFileName }() + origBootstrapFileName := envconfig.XDSBootstrapFileName + envconfig.XDSBootstrapFileName = "" + defer func() { envconfig.XDSBootstrapFileName = origBootstrapFileName }() - origBootstrapContent := env.BootstrapFileContent - env.BootstrapFileContent = "" - defer func() { env.BootstrapFileContent = origBootstrapContent }() + origBootstrapContent := envconfig.XDSBootstrapFileContent + envconfig.XDSBootstrapFileContent = "" + defer func() { envconfig.XDSBootstrapFileContent = origBootstrapContent }() // When both env variables are empty, NewConfig should fail. if _, err := NewConfig(); err == nil { @@ -486,23 +464,35 @@ func TestNewConfigBootstrapEnvPriority(t *testing.T) { } // When one of them is set, it should be used. - env.BootstrapFileName = goodFileName1 - env.BootstrapFileContent = "" - if c, err := NewConfig(); err != nil || c.compare(goodConfig1) != nil { - t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) + envconfig.XDSBootstrapFileName = goodFileName1 + envconfig.XDSBootstrapFileContent = "" + c, err := NewConfig() + if err != nil { + t.Errorf("NewConfig() failed: %v", err) + } + if err := c.compare(goodConfig1); err != nil { + t.Error(err) } - env.BootstrapFileName = "" - env.BootstrapFileContent = goodFileContent2 - if c, err := NewConfig(); err != nil || c.compare(goodConfig2) != nil { - t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) + envconfig.XDSBootstrapFileName = "" + envconfig.XDSBootstrapFileContent = goodFileContent2 + c, err = NewConfig() + if err != nil { + t.Errorf("NewConfig() failed: %v", err) + } + if err := c.compare(goodConfig2); err != nil { + t.Error(err) } // Set both, file name should be read. - env.BootstrapFileName = goodFileName1 - env.BootstrapFileContent = goodFileContent2 - if c, err := NewConfig(); err != nil || c.compare(goodConfig1) != nil { - t.Errorf("NewConfig() = %v, %v, want: %v, %v", c, err, goodConfig1, nil) + envconfig.XDSBootstrapFileName = goodFileName1 + envconfig.XDSBootstrapFileContent = goodFileContent2 + c, err = NewConfig() + if err != nil { + t.Errorf("NewConfig() failed: %v", err) + } + if err := c.compare(goodConfig1); err != nil { + t.Error(err) } } @@ -589,7 +579,7 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { "channel_creds": [ { "type": "google_default" } ], - "server_features" : ["foo", "bar", "xds_v3"] + "server_features" : ["xds_v3"] }], "certificate_providers": { "unknownProviderInstance1": { @@ -615,7 +605,7 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { "channel_creds": [ { "type": "google_default" } ], - "server_features" : ["foo", "bar", "xds_v3"], + "server_features" : ["xds_v3"], }], "certificate_providers": { "unknownProviderInstance": { @@ -639,9 +629,9 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { "xds_servers" : [{ "server_uri": "trafficdirector.googleapis.com:443", "channel_creds": [ - { "type": "google_default" } + { "type": "insecure" } ], - "server_features" : ["foo", "bar", "xds_v3"] + "server_features" : ["xds_v3"] }], "certificate_providers": { "unknownProviderInstance": { @@ -669,14 +659,24 @@ func TestNewConfigWithCertificateProviders(t *testing.T) { cancel := setupBootstrapOverride(bootstrapFileMap) defer cancel() + // Cannot use xdstestutils.ServerConfigForAddress here, as it would lead to + // a cyclic dependency. + jsonCfg := `{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [{"type": "insecure"}], + "server_features": ["xds_v3"] + }` + serverCfg, err := ServerConfigFromJSON([]byte(jsonCfg)) + if err != nil { + t.Fatalf("Failed to create server config from JSON %s: %v", jsonCfg, err) + } goodConfig := &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - TransportAPI: version.TransportV3, - NodeProto: v3NodeProto, + XDSServer: serverCfg, + NodeProto: v3NodeProto, CertProviderConfigs: map[string]*certprovider.BuildableConfig{ "fakeProviderInstance": wantCfg, }, + ClientDefaultListenerResourceNameTemplate: "%s", } tests := []struct { name string @@ -760,11 +760,13 @@ func TestNewConfigWithServerListenerResourceNameTemplate(t *testing.T) { { name: "goodServerListenerResourceNameTemplate", wantConfig: &Config{ - BalancerName: "trafficdirector.googleapis.com:443", - Creds: grpc.WithCredentialsBundle(google.NewComputeEngineCredentials()), - TransportAPI: version.TransportV2, - NodeProto: v2NodeProto, - ServerListenerResourceNameTemplate: "grpc/server?xds.resource.listening_address=%s", + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + }, + NodeProto: v3NodeProto, + ServerListenerResourceNameTemplate: "grpc/server?xds.resource.listening_address=%s", + ClientDefaultListenerResourceNameTemplate: "%s", }, }, } @@ -776,3 +778,264 @@ func TestNewConfigWithServerListenerResourceNameTemplate(t *testing.T) { }) } } + +func TestNewConfigWithFederation(t *testing.T) { + cancel := setupBootstrapOverride(map[string]string{ + "badClientListenerResourceNameTemplate": ` + { + "node": { "id": "ENVOY_NODE_ID" }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443" + }], + "client_default_listener_resource_name_template": 123456789 + }`, + "badClientListenerResourceNameTemplatePerAuthority": ` + { + "node": { "id": "ENVOY_NODE_ID" }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }], + "authorities": { + "xds.td.com": { + "client_listener_resource_name_template": "some/template/%s", + "xds_servers": [{ + "server_uri": "td.com", + "channel_creds": [ { "type": "google_default" } ], + "server_features" : ["foo", "bar", "xds_v3"] + }] + } + } + }`, + "good": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }], + "server_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/grpc/server?listening_address=%s", + "client_default_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + "authorities": { + "xds.td.com": { + "client_listener_resource_name_template": "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", + "xds_servers": [{ + "server_uri": "td.com", + "channel_creds": [ { "type": "google_default" } ], + "server_features" : ["xds_v3"] + }] + } + } + }`, + // If client_default_listener_resource_name_template is not set, it + // defaults to "%s". + "goodWithDefaultDefaultClientListenerTemplate": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }] + }`, + // If client_listener_resource_name_template in authority is not set, it + // defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + "goodWithDefaultClientListenerTemplatePerAuthority": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }], + "client_default_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + "authorities": { + "xds.td.com": { }, + "#.com": { } + } + }`, + // It's OK for an authority to not have servers. The top-level server + // will be used. + "goodWithNoServerPerAuthority": ` + { + "node": { + "id": "ENVOY_NODE_ID", + "metadata": { + "TRAFFICDIRECTOR_GRPC_HOSTNAME": "trafficdirector" + } + }, + "xds_servers" : [{ + "server_uri": "trafficdirector.googleapis.com:443", + "channel_creds": [ { "type": "google_default" } ] + }], + "client_default_listener_resource_name_template": "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + "authorities": { + "xds.td.com": { + "client_listener_resource_name_template": "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s" + } + } + }`, + }) + defer cancel() + + tests := []struct { + name string + wantConfig *Config + wantErr bool + }{ + { + name: "badClientListenerResourceNameTemplate", + wantErr: true, + }, + { + name: "badClientListenerResourceNameTemplatePerAuthority", + wantErr: true, + }, + { + name: "good", + wantConfig: &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + }, + NodeProto: v3NodeProto, + ServerListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/grpc/server?listening_address=%s", + ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + Authorities: map[string]*Authority{ + "xds.td.com": { + ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", + XDSServer: &ServerConfig{ + ServerURI: "td.com", + Creds: ChannelCreds{Type: "google_default"}, + ServerFeatures: []string{"xds_v3"}, + }, + }, + }, + }, + }, + { + name: "goodWithDefaultDefaultClientListenerTemplate", + wantConfig: &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "%s", + }, + }, + { + name: "goodWithDefaultClientListenerTemplatePerAuthority", + wantConfig: &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + Authorities: map[string]*Authority{ + "xds.td.com": { + ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", + }, + "#.com": { + ClientListenerResourceNameTemplate: "xdstp://%23.com/envoy.config.listener.v3.Listener/%s", + }, + }, + }, + }, + { + name: "goodWithNoServerPerAuthority", + wantConfig: &Config{ + XDSServer: &ServerConfig{ + ServerURI: "trafficdirector.googleapis.com:443", + Creds: ChannelCreds{Type: "google_default"}, + }, + NodeProto: v3NodeProto, + ClientDefaultListenerResourceNameTemplate: "xdstp://xds.example.com/envoy.config.listener.v3.Listener/%s", + Authorities: map[string]*Authority{ + "xds.td.com": { + ClientListenerResourceNameTemplate: "xdstp://xds.td.com/envoy.config.listener.v3.Listener/%s", + }, + }, + }, + }, + } + + oldFederationSupport := envconfig.XDSFederation + envconfig.XDSFederation = true + defer func() { envconfig.XDSFederation = oldFederationSupport }() + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testNewConfigWithFileNameEnv(t, test.name, test.wantErr, test.wantConfig) + testNewConfigWithFileContentEnv(t, test.name, test.wantErr, test.wantConfig) + }) + } +} + +func TestServerConfigMarshalAndUnmarshal(t *testing.T) { + jsonCfg := `{ + "server_uri": "test-server", + "channel_creds": [{"type": "insecure"}], + "server_features": ["xds_v3"] + }` + origConfig, err := ServerConfigFromJSON([]byte(jsonCfg)) + if err != nil { + t.Fatalf("Failed to create server config from JSON %s: %v", jsonCfg, err) + } + bs, err := json.Marshal(origConfig) + if err != nil { + t.Fatalf("failed to marshal: %v", err) + } + + unmarshaledConfig := new(ServerConfig) + if err := json.Unmarshal(bs, unmarshaledConfig); err != nil { + t.Fatalf("failed to unmarshal: %v", err) + } + if diff := cmp.Diff(origConfig, unmarshaledConfig); diff != "" { + t.Fatalf("Unexpected diff in server config (-want, +got):\n%s", diff) + } +} + +func TestDefaultBundles(t *testing.T) { + if c := bootstrap.GetCredentials("google_default"); c == nil { + t.Errorf(`bootstrap.GetCredentials("google_default") credential is nil, want non-nil`) + } + + if c := bootstrap.GetCredentials("insecure"); c == nil { + t.Errorf(`bootstrap.GetCredentials("insecure") credential is nil, want non-nil`) + } +} + +func TestCredsBuilders(t *testing.T) { + b := &googleDefaultCredsBuilder{} + if _, err := b.Build(nil); err != nil { + t.Errorf("googleDefaultCredsBuilder.Build failed: %v", err) + } + if got, want := b.Name(), "google_default"; got != want { + t.Errorf("googleDefaultCredsBuilder.Name = %v, want %v", got, want) + } + + i := &insecureCredsBuilder{} + if _, err := i.Build(nil); err != nil { + t.Errorf("insecureCredsBuilder.Build failed: %v", err) + } + + if got, want := i.Name(), "insecure"; got != want { + t.Errorf("insecureCredsBuilder.Name = %v, want %v", got, want) + } +} diff --git a/xds/internal/client/bootstrap/logging.go b/xds/internal/xdsclient/bootstrap/logging.go similarity index 100% rename from xds/internal/client/bootstrap/logging.go rename to xds/internal/xdsclient/bootstrap/logging.go diff --git a/xds/internal/xdsclient/bootstrap/template.go b/xds/internal/xdsclient/bootstrap/template.go new file mode 100644 index 000000000000..9b51fcc83972 --- /dev/null +++ b/xds/internal/xdsclient/bootstrap/template.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bootstrap + +import ( + "net/url" + "strings" +) + +// PopulateResourceTemplate populates the given template using the target +// string. "%s", if exists in the template, will be replaced with target. +// +// If the template starts with "xdstp:", the replaced string will be %-encoded. +// But note that "/" is not percent encoded. +func PopulateResourceTemplate(template, target string) string { + if !strings.Contains(template, "%s") { + return template + } + if strings.HasPrefix(template, "xdstp:") { + target = percentEncode(target) + } + return strings.Replace(template, "%s", target, -1) +} + +// percentEncode percent encode t, except for "/". See the tests for examples. +func percentEncode(t string) string { + segs := strings.Split(t, "/") + for i := range segs { + segs[i] = url.PathEscape(segs[i]) + } + return strings.Join(segs, "/") +} diff --git a/xds/internal/xdsclient/bootstrap/template_test.go b/xds/internal/xdsclient/bootstrap/template_test.go new file mode 100644 index 000000000000..bc12eb42991f --- /dev/null +++ b/xds/internal/xdsclient/bootstrap/template_test.go @@ -0,0 +1,97 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package bootstrap + +import "testing" + +func Test_percentEncode(t *testing.T) { + tests := []struct { + name string + target string + want string + }{ + { + name: "normal name", + target: "server.example.com", + want: "server.example.com", + }, + { + name: "ipv4", + target: "0.0.0.0:8080", + want: "0.0.0.0:8080", + }, + { + name: "ipv6", + target: "[::1]:8080", + want: "%5B::1%5D:8080", // [ and ] are percent encoded. + }, + { + name: "/ should not be percent encoded", + target: "my/service/region", + want: "my/service/region", // "/"s are kept. + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := percentEncode(tt.target); got != tt.want { + t.Errorf("percentEncode() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPopulateResourceTemplate(t *testing.T) { + tests := []struct { + name string + template string + target string + want string + }{ + { + name: "no %s", + template: "/name/template", + target: "[::1]:8080", + want: "/name/template", + }, + { + name: "with %s, no xdstp: prefix, ipv6", + template: "/name/template/%s", + target: "[::1]:8080", + want: "/name/template/[::1]:8080", + }, + { + name: "with %s, with xdstp: prefix", + template: "xdstp://authority.com/%s", + target: "0.0.0.0:8080", + want: "xdstp://authority.com/0.0.0.0:8080", + }, + { + name: "with %s, with xdstp: prefix, and ipv6", + template: "xdstp://authority.com/%s", + target: "[::1]:8080", + want: "xdstp://authority.com/%5B::1%5D:8080", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := PopulateResourceTemplate(tt.template, tt.target); got != tt.want { + t.Errorf("PopulateResourceTemplate() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/xds/internal/xdsclient/client.go b/xds/internal/xdsclient/client.go new file mode 100644 index 000000000000..44f6d3bc0a1c --- /dev/null +++ b/xds/internal/xdsclient/client.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdsclient implements a full fledged gRPC client for the xDS API used +// by the xds resolver and balancer implementations. +package xdsclient + +import ( + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// XDSClient is a full fledged gRPC client which queries a set of discovery APIs +// (collectively termed as xDS) on a remote management server, to discover +// various dynamic resources. +type XDSClient interface { + WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() + WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() + WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() + + // WatchResource uses xDS to discover the resource associated with the + // provided resource name. The resource type implementation determines how + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. + // + // Most callers will not have a need to use this API directly. They will + // instead use a resource-type-specific wrapper API provided by the relevant + // resource type implementation. + // + // + // During a race (e.g. an xDS response is received while the user is calling + // cancel()), there's a small window where the callback can be called after + // the watcher is canceled. Callers need to handle this case. + // + // TODO: Once this generic client API is fully implemented and integrated, + // delete the resource type specific watch APIs on this interface. + WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) + + // DumpResources returns the status of the xDS resources. Returns a map of + // resource type URLs to a map of resource names to resource state. + DumpResources() map[string]map[string]xdsresource.UpdateWithMD + + ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) + + BootstrapConfig() *bootstrap.Config +} diff --git a/xds/internal/xdsclient/client_new.go b/xds/internal/xdsclient/client_new.go new file mode 100644 index 000000000000..b330c19dfd17 --- /dev/null +++ b/xds/internal/xdsclient/client_new.go @@ -0,0 +1,166 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +// New returns a new xDS client configured by the bootstrap file specified in env +// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. +// +// The returned client is a reference counted singleton instance. This function +// creates a new client only when one doesn't already exist. +// +// The second return value represents a close function which releases the +// caller's reference on the returned client. The caller is expected to invoke +// it once they are done using the client. The underlying client will be closed +// only when all references are released, and it is safe for the caller to +// invoke this close function multiple times. +func New() (XDSClient, func(), error) { + return newRefCountedWithConfig(nil) +} + +// NewWithConfig returns a new xDS client configured by the given config. +// +// The second return value represents a close function which releases the +// caller's reference on the returned client. The caller is expected to invoke +// it once they are done using the client. The underlying client will be closed +// only when all references are released, and it is safe for the caller to +// invoke this close function multiple times. +// +// # Internal/Testing Only +// +// This function should ONLY be used for internal (c2p resolver) and/or testing +// purposese. DO NOT use this elsewhere. Use New() instead. +func NewWithConfig(config *bootstrap.Config) (XDSClient, func(), error) { + return newRefCountedWithConfig(config) +} + +// newWithConfig returns a new xdsClient with the given config. +func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { + ctx, cancel := context.WithCancel(context.Background()) + c := &clientImpl{ + done: grpcsync.NewEvent(), + config: config, + watchExpiryTimeout: watchExpiryTimeout, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerClose: cancel, + resourceTypes: newResourceTypeRegistry(), + authorities: make(map[string]*authority), + idleAuthorities: cache.NewTimeoutCache(idleAuthorityDeleteTimeout), + } + + c.logger = prefixLogger(c) + c.logger.Infof("Created client to xDS management server: %s", config.XDSServer) + return c, nil +} + +// NewWithConfigForTesting returns an xDS client for the specified bootstrap +// config, separate from the global singleton. +// +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// +// # Testing Only +// +// This function should ONLY be used for testing purposes. +// TODO(easwars): Document the new close func. +func NewWithConfigForTesting(config *bootstrap.Config, watchExpiryTimeout, authorityIdleTimeout time.Duration) (XDSClient, func(), error) { + cl, err := newWithConfig(config, watchExpiryTimeout, authorityIdleTimeout) + if err != nil { + return nil, nil, err + } + return cl, grpcsync.OnceFunc(cl.close), nil +} + +// NewWithBootstrapContentsForTesting returns an xDS client for this config, +// separate from the global singleton. +// +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// +// # Testing Only +// +// This function should ONLY be used for testing purposes. +func NewWithBootstrapContentsForTesting(contents []byte) (XDSClient, func(), error) { + // Normalize the contents + buf := bytes.Buffer{} + err := json.Indent(&buf, contents, "", "") + if err != nil { + return nil, nil, fmt.Errorf("xds: error normalizing JSON: %v", err) + } + contents = bytes.TrimSpace(buf.Bytes()) + + c, err := getOrMakeClientForTesting(contents) + if err != nil { + return nil, nil, err + } + return c, grpcsync.OnceFunc(func() { + clientsMu.Lock() + defer clientsMu.Unlock() + if c.decrRef() == 0 { + c.close() + delete(clients, string(contents)) + } + }), nil +} + +// getOrMakeClientForTesting creates a new reference counted client (separate +// from the global singleton) for the given config, or returns an existing one. +// It takes care of incrementing the reference count for the returned client, +// and leaves the caller responsible for decrementing the reference count once +// the client is no longer needed. +func getOrMakeClientForTesting(config []byte) (*clientRefCounted, error) { + clientsMu.Lock() + defer clientsMu.Unlock() + + if c := clients[string(config)]; c != nil { + c.incrRef() + return c, nil + } + + bcfg, err := bootstrap.NewConfigFromContentsForTesting(config) + if err != nil { + return nil, fmt.Errorf("bootstrap config %s: %v", string(config), err) + } + cImpl, err := newWithConfig(bcfg, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + return nil, fmt.Errorf("creating xDS client: %v", err) + } + c := &clientRefCounted{clientImpl: cImpl, refCount: 1} + clients[string(config)] = c + return c, nil +} + +var ( + clients = map[string]*clientRefCounted{} + clientsMu sync.Mutex +) diff --git a/credentials/go12.go b/xds/internal/xdsclient/client_test.go similarity index 60% rename from credentials/go12.go rename to xds/internal/xdsclient/client_test.go index ccbf35b33125..f32688850e4f 100644 --- a/credentials/go12.go +++ b/xds/internal/xdsclient/client_test.go @@ -1,5 +1,3 @@ -// +build go1.12 - /* * * Copyright 2019 gRPC authors. @@ -18,13 +16,25 @@ * */ -package credentials +package xdsclient + +import ( + "testing" + "time" -import "crypto/tls" + "google.golang.org/grpc/internal/grpctest" +) -// This init function adds cipher suite constants only defined in Go 1.12. -func init() { - cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" - cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" - cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +type s struct { + grpctest.Tester } + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestWatchExpiryTimeout = 100 * time.Millisecond + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. +) diff --git a/xds/internal/xdsclient/clientimpl.go b/xds/internal/xdsclient/clientimpl.go new file mode 100644 index 000000000000..2c05ea66f5f9 --- /dev/null +++ b/xds/internal/xdsclient/clientimpl.go @@ -0,0 +1,89 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "sync" + "time" + + "google.golang.org/grpc/internal/cache" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +var _ XDSClient = &clientImpl{} + +// clientImpl is the real implementation of the xds client. The exported Client +// is a wrapper of this struct with a ref count. +type clientImpl struct { + done *grpcsync.Event + config *bootstrap.Config + logger *grpclog.PrefixLogger + watchExpiryTimeout time.Duration + serializer *grpcsync.CallbackSerializer + serializerClose func() + resourceTypes *resourceTypeRegistry + + // authorityMu protects the authority fields. It's necessary because an + // authority is created when it's used. + authorityMu sync.Mutex + // authorities is a map from ServerConfig to authority. So that + // different authorities sharing the same ServerConfig can share the + // authority. + // + // The key is **ServerConfig.String()**, not the authority name. + // + // An authority is either in authorities, or idleAuthorities, + // never both. + authorities map[string]*authority + // idleAuthorities keeps the authorities that are not used (the last + // watch on it was canceled). They are kept in the cache and will be deleted + // after a timeout. The key is ServerConfig.String(). + // + // An authority is either in authorities, or idleAuthorities, + // never both. + idleAuthorities *cache.TimeoutCache +} + +// BootstrapConfig returns the configuration read from the bootstrap file. +// Callers must treat the return value as read-only. +func (c *clientImpl) BootstrapConfig() *bootstrap.Config { + return c.config +} + +// close closes the gRPC connection to the management server. +func (c *clientImpl) close() { + if c.done.HasFired() { + return + } + c.done.Fire() + // TODO: Should we invoke the registered callbacks here with an error that + // the client is closed? + + c.authorityMu.Lock() + for _, a := range c.authorities { + a.close() + } + c.idleAuthorities.Clear(true) + c.authorityMu.Unlock() + c.serializerClose() + + c.logger.Infof("Shutdown") +} diff --git a/xds/internal/xdsclient/clientimpl_authority.go b/xds/internal/xdsclient/clientimpl_authority.go new file mode 100644 index 000000000000..925566cf44f3 --- /dev/null +++ b/xds/internal/xdsclient/clientimpl_authority.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// findAuthority returns the authority for this name. If it doesn't already +// exist, one will be created. +// +// Note that this doesn't always create new authority. authorities with the same +// config but different names are shared. +// +// The returned unref function must be called when the caller is done using this +// authority, without holding c.authorityMu. +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) findAuthority(n *xdsresource.Name) (_ *authority, unref func(), _ error) { + scheme, authority := n.Scheme, n.Authority + + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if c.done.HasFired() { + return nil, nil, errors.New("the xds-client is closed") + } + + config := c.config.XDSServer + if scheme == xdsresource.FederationScheme { + cfg, ok := c.config.Authorities[authority] + if !ok { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + if cfg.XDSServer != nil { + config = cfg.XDSServer + } + } + + a, err := c.newAuthorityLocked(config) + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to connect to the control plane for authority %q: %v", authority, err) + } + // All returned authority from this function will be used by a watch, + // holding the ref here. + // + // Note that this must be done while c.authorityMu is held, to avoid the + // race that an authority is returned, but before the watch starts, the + // old last watch is canceled (in another goroutine), causing this + // authority to be removed, and then a watch will start on a removed + // authority. + // + // unref() will be done when the watch is canceled. + a.refLocked() + return a, func() { c.unrefAuthority(a) }, nil +} + +// newAuthorityLocked creates a new authority for the given config. If an +// authority for the given config exists in the cache, it is returned instead of +// creating a new one. +// +// The caller must take a reference of the returned authority before using, and +// unref afterwards. +// +// caller must hold c.authorityMu +func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *authority, retErr error) { + // First check if there's already an authority for this config. If found, it + // means this authority is used by other watches (could be the same + // authority name, or a different authority name but the same server + // config). Return it. + configStr := config.String() + if a, ok := c.authorities[configStr]; ok { + return a, nil + } + // Second check if there's an authority in the idle cache. If found, it + // means this authority was created, but moved to the idle cache because the + // watch was canceled. Move it from idle cache to the authority cache, and + // return. + if old, ok := c.idleAuthorities.Remove(configStr); ok { + oldA, _ := old.(*authority) + if oldA != nil { + c.authorities[configStr] = oldA + return oldA, nil + } + } + + // Make a new authority since there's no existing authority for this config. + ret, err := newAuthority(authorityArgs{ + serverCfg: config, + bootstrapCfg: c.config, + serializer: c.serializer, + resourceTypeGetter: c.resourceTypes.get, + watchExpiryTimeout: c.watchExpiryTimeout, + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI)), + }) + if err != nil { + return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) + } + // Add it to the cache, so it will be reused. + c.authorities[configStr] = ret + return ret, nil +} + +// unrefAuthority unrefs the authority. It also moves the authority to idle +// cache if it's ref count is 0. +// +// This function doesn't need to called explicitly. It's called by the returned +// unref from findAuthority(). +// +// Caller must not hold c.authorityMu. +func (c *clientImpl) unrefAuthority(a *authority) { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + if a.unrefLocked() > 0 { + return + } + configStr := a.serverCfg.String() + delete(c.authorities, configStr) + c.idleAuthorities.Add(configStr, a, func() { + a.close() + }) +} diff --git a/xds/internal/xdsclient/clientimpl_dump.go b/xds/internal/xdsclient/clientimpl_dump.go new file mode 100644 index 000000000000..b9d0499301a2 --- /dev/null +++ b/xds/internal/xdsclient/clientimpl_dump.go @@ -0,0 +1,53 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +func appendMaps(dst, src map[string]map[string]xdsresource.UpdateWithMD) { + // Iterate through the resource types. + for rType, srcResources := range src { + // Lookup/create the resource type specific map in the destination. + dstResources := dst[rType] + if dstResources == nil { + dstResources = make(map[string]xdsresource.UpdateWithMD) + dst[rType] = dstResources + } + + // Iterate through the resources within the resource type in the source, + // and copy them over to the destination. + for name, update := range srcResources { + dstResources[name] = update + } + } +} + +// DumpResources returns the status and contents of all xDS resources. +func (c *clientImpl) DumpResources() map[string]map[string]xdsresource.UpdateWithMD { + c.authorityMu.Lock() + defer c.authorityMu.Unlock() + dumps := make(map[string]map[string]xdsresource.UpdateWithMD) + for _, a := range c.authorities { + dump := a.dumpResources() + appendMaps(dumps, dump) + } + return dumps +} diff --git a/xds/internal/xdsclient/clientimpl_loadreport.go b/xds/internal/xdsclient/clientimpl_loadreport.go new file mode 100644 index 000000000000..e53df3f1edd9 --- /dev/null +++ b/xds/internal/xdsclient/clientimpl_loadreport.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" +) + +// ReportLoad starts a load reporting stream to the given server. All load +// reports to the same server share the LRS stream. +// +// It returns a Store for the user to report loads, a function to cancel the +// load reporting stream. +func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, func()) { + c.authorityMu.Lock() + a, err := c.newAuthorityLocked(server) + if err != nil { + c.authorityMu.Unlock() + c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) + return nil, func() {} + } + // Hold the ref before starting load reporting. + a.refLocked() + c.authorityMu.Unlock() + + store, cancelF := a.reportLoad() + return store, func() { + cancelF() + c.unrefAuthority(a) + } +} diff --git a/xds/internal/xdsclient/clientimpl_watchers.go b/xds/internal/xdsclient/clientimpl_watchers.go new file mode 100644 index 000000000000..e503349dbc29 --- /dev/null +++ b/xds/internal/xdsclient/clientimpl_watchers.go @@ -0,0 +1,192 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient + +import ( + "context" + "fmt" + "sync" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// This is only required temporarily, while we modify the +// clientImpl.WatchListener API to be implemented via the wrapper +// WatchListener() API which calls the WatchResource() API. +type listenerWatcher struct { + resourceName string + cb func(xdsresource.ListenerUpdate, error) +} + +func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { + l.cb(update.Resource, nil) +} + +func (l *listenerWatcher) OnError(err error) { + l.cb(xdsresource.ListenerUpdate{}, err) +} + +func (l *listenerWatcher) OnResourceDoesNotExist() { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Listener not found in received response", l.resourceName) + l.cb(xdsresource.ListenerUpdate{}, err) +} + +// WatchListener uses LDS to discover information about the Listener resource +// identified by resourceName. +func (c *clientImpl) WatchListener(resourceName string, cb func(xdsresource.ListenerUpdate, error)) (cancel func()) { + watcher := &listenerWatcher{resourceName: resourceName, cb: cb} + return xdsresource.WatchListener(c, resourceName, watcher) +} + +// This is only required temporarily, while we modify the +// clientImpl.WatchRouteConfig API to be implemented via the wrapper +// WatchRouteConfig() API which calls the WatchResource() API. +type routeConfigWatcher struct { + resourceName string + cb func(xdsresource.RouteConfigUpdate, error) +} + +func (r *routeConfigWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { + r.cb(update.Resource, nil) +} + +func (r *routeConfigWatcher) OnError(err error) { + r.cb(xdsresource.RouteConfigUpdate{}, err) +} + +func (r *routeConfigWatcher) OnResourceDoesNotExist() { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type RouteConfiguration not found in received response", r.resourceName) + r.cb(xdsresource.RouteConfigUpdate{}, err) +} + +// WatchRouteConfig uses RDS to discover information about the +// RouteConfiguration resource identified by resourceName. +func (c *clientImpl) WatchRouteConfig(resourceName string, cb func(xdsresource.RouteConfigUpdate, error)) (cancel func()) { + watcher := &routeConfigWatcher{resourceName: resourceName, cb: cb} + return xdsresource.WatchRouteConfig(c, resourceName, watcher) +} + +// This is only required temporarily, while we modify the +// clientImpl.WatchCluster API to be implemented via the wrapper WatchCluster() +// API which calls the WatchResource() API. +type clusterWatcher struct { + resourceName string + cb func(xdsresource.ClusterUpdate, error) +} + +func (c *clusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData) { + c.cb(update.Resource, nil) +} + +func (c *clusterWatcher) OnError(err error) { + c.cb(xdsresource.ClusterUpdate{}, err) +} + +func (c *clusterWatcher) OnResourceDoesNotExist() { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", c.resourceName) + c.cb(xdsresource.ClusterUpdate{}, err) +} + +// WatchCluster uses CDS to discover information about the Cluster resource +// identified by resourceName. +// +// WatchCluster can be called multiple times, with same or different +// clusterNames. Each call will start an independent watcher for the resource. +func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { + watcher := &clusterWatcher{resourceName: resourceName, cb: cb} + return xdsresource.WatchCluster(c, resourceName, watcher) +} + +// WatchResource uses xDS to discover the resource associated with the provided +// resource name. The resource type implementation determines how xDS requests +// are sent out and how responses are deserialized and validated. Upon receipt +// of a response from the management server, an appropriate callback on the +// watcher is invoked. +func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) { + // Return early if the client is already closed. + // + // The client returned from the top-level API is a ref-counted client which + // contains a pointer to `clientImpl`. When all references are released, the + // ref-counted client sets its pointer to `nil`. And if any watch APIs are + // made on such a closed client, we will get here with a `nil` receiver. + if c == nil || c.done.HasFired() { + logger.Warningf("Watch registered for name %q of type %q, but client is closed", rType.TypeName(), resourceName) + return func() {} + } + + if err := c.resourceTypes.maybeRegister(rType); err != nil { + logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName) + c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + return func() {} + } + + // TODO: replace this with the code does the following when we have + // implemented generic watch API on the authority: + // - Parse the resource name and extract the authority. + // - Locate the corresponding authority object and acquire a reference to + // it. If the authority is not found, error out. + // - Call the watchResource() method on the authority. + // - Return a cancel function to cancel the watch on the authority and to + // release the reference. + + // TODO: Make ParseName return an error if parsing fails, and + // schedule the OnError callback in that case. + n := xdsresource.ParseName(resourceName) + a, unref, err := c.findAuthority(n) + if err != nil { + logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) + c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + return func() {} + } + cancelF := a.watchResource(rType, n.String(), watcher) + return func() { + cancelF() + unref() + } +} + +// A registry of xdsresource.Type implementations indexed by their corresponding +// type URLs. Registration of an xdsresource.Type happens the first time a watch +// for a resource of that type is invoked. +type resourceTypeRegistry struct { + mu sync.Mutex + types map[string]xdsresource.Type +} + +func newResourceTypeRegistry() *resourceTypeRegistry { + return &resourceTypeRegistry{types: make(map[string]xdsresource.Type)} +} + +func (r *resourceTypeRegistry) get(url string) xdsresource.Type { + r.mu.Lock() + defer r.mu.Unlock() + return r.types[url] +} + +func (r *resourceTypeRegistry) maybeRegister(rType xdsresource.Type) error { + r.mu.Lock() + defer r.mu.Unlock() + + url := rType.TypeURL() + typ, ok := r.types[url] + if ok && typ != rType { + return fmt.Errorf("attempt to re-register a resource type implementation for %v", rType.TypeName()) + } + r.types[url] = rType + return nil +} diff --git a/xds/internal/client/load/reporter.go b/xds/internal/xdsclient/load/reporter.go similarity index 100% rename from xds/internal/client/load/reporter.go rename to xds/internal/xdsclient/load/reporter.go diff --git a/xds/internal/client/load/store.go b/xds/internal/xdsclient/load/store.go similarity index 100% rename from xds/internal/client/load/store.go rename to xds/internal/xdsclient/load/store.go diff --git a/xds/internal/client/load/store_test.go b/xds/internal/xdsclient/load/store_test.go similarity index 100% rename from xds/internal/client/load/store_test.go rename to xds/internal/xdsclient/load/store_test.go diff --git a/xds/internal/client/tests/loadreport_test.go b/xds/internal/xdsclient/loadreport_test.go similarity index 66% rename from xds/internal/client/tests/loadreport_test.go rename to xds/internal/xdsclient/loadreport_test.go index af145e7f2a92..06e58acdd2dc 100644 --- a/xds/internal/client/tests/loadreport_test.go +++ b/xds/internal/xdsclient/loadreport_test.go @@ -16,77 +16,73 @@ * */ -package tests_test +package xdsclient import ( "context" "testing" "time" - v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" - endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint" - lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v2" - durationpb "github.com/golang/protobuf/ptypes/duration" "github.com/google/go-cmp/cmp" - "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" "google.golang.org/grpc/status" - "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" - "google.golang.org/grpc/xds/internal/testutils/fakeserver" - "google.golang.org/grpc/xds/internal/version" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" "google.golang.org/protobuf/testing/protocmp" - _ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 xDS API client. + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + durationpb "github.com/golang/protobuf/ptypes/duration" ) const ( - defaultTestTimeout = 5 * time.Second - defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. defaultClientWatchExpiryTimeout = 15 * time.Second ) func (s) TestLRSClient(t *testing.T) { - fs, sCleanup, err := fakeserver.StartServer() + fs1, sCleanup, err := fakeserver.StartServer(nil) if err != nil { t.Fatalf("failed to start fake xDS server: %v", err) } defer sCleanup() - xdsC, err := client.NewWithConfigForTesting(&bootstrap.Config{ - BalancerName: fs.Address, - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), - NodeProto: &v2corepb.Node{}, - TransportAPI: version.TransportV2, - }, defaultClientWatchExpiryTimeout) + serverCfg1 := xdstestutils.ServerConfigForAddress(t, fs1.Address) + xdsC, close, err := NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: serverCfg1, + NodeProto: &v3corepb.Node{}, + }, defaultClientWatchExpiryTimeout, time.Duration(0)) if err != nil { t.Fatalf("failed to create xds client: %v", err) } - defer xdsC.Close() + defer close() + + // Report to the same address should not create new ClientConn. + store1, lrsCancel1 := xdsC.ReportLoad(serverCfg1) + defer lrsCancel1() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) defer cancel() - if u, err := fs.NewConnChan.Receive(ctx); err != nil { + if u, err := fs1.NewConnChan.Receive(ctx); err != nil { t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err) } - // Report to the same address should not create new ClientConn. - store1, lrsCancel1 := xdsC.ReportLoad(fs.Address) - defer lrsCancel1() sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() - if u, err := fs.NewConnChan.Receive(sCtx); err != context.DeadlineExceeded { + if u, err := fs1.NewConnChan.Receive(sCtx); err != context.DeadlineExceeded { t.Errorf("unexpected NewConn: %v, %v, want channel recv timeout", u, err) } - fs2, sCleanup2, err := fakeserver.StartServer() + fs2, sCleanup2, err := fakeserver.StartServer(nil) if err != nil { t.Fatalf("failed to start fake xDS server: %v", err) } defer sCleanup2() // Report to a different address should create new ClientConn. - store2, lrsCancel2 := xdsC.ReportLoad(fs2.Address) + serverCgf2 := xdstestutils.ServerConfigForAddress(t, fs2.Address) + store2, lrsCancel2 := xdsC.ReportLoad(serverCgf2) defer lrsCancel2() if u, err := fs2.NewConnChan.Receive(ctx); err != nil { t.Errorf("unexpected timeout: %v, %v, want NewConn", u, err) @@ -103,7 +99,7 @@ func (s) TestLRSClient(t *testing.T) { // Send one resp to the client. fs2.LRSResponseChan <- &fakeserver.Response{ - Resp: &lrspb.LoadStatsResponse{ + Resp: &v3lrspb.LoadStatsResponse{ SendAllClusters: true, LoadReportingInterval: &durationpb.Duration{Nanos: 50000000}, }, @@ -114,16 +110,16 @@ func (s) TestLRSClient(t *testing.T) { if err != nil { t.Fatalf("unexpected LRS request: %v, %v, want error canceled", u, err) } - receivedLoad := u.(*fakeserver.Request).Req.(*lrspb.LoadStatsRequest).ClusterStats + receivedLoad := u.(*fakeserver.Request).Req.(*v3lrspb.LoadStatsRequest).ClusterStats if len(receivedLoad) <= 0 { t.Fatalf("unexpected load received, want load for cluster, eds, dropped for test") } receivedLoad[0].LoadReportInterval = nil - want := &endpointpb.ClusterStats{ + want := &v3endpointpb.ClusterStats{ ClusterName: "cluster", ClusterServiceName: "eds", TotalDroppedRequests: 1, - DroppedRequests: []*endpointpb.ClusterStats_DroppedRequests{{Category: "test", DroppedCount: 1}}, + DroppedRequests: []*v3endpointpb.ClusterStats_DroppedRequests{{Category: "test", DroppedCount: 1}}, } if d := cmp.Diff(want, receivedLoad[0], protocmp.Transform()); d != "" { t.Fatalf("unexpected load received, want load for cluster, eds, dropped for test, diff (-want +got):\n%s", d) diff --git a/xds/internal/xdsclient/logging.go b/xds/internal/xdsclient/logging.go new file mode 100644 index 000000000000..2269cb293da9 --- /dev/null +++ b/xds/internal/xdsclient/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("xds") + +func prefixLogger(p *clientImpl) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, clientPrefix(p)) +} + +func clientPrefix(p *clientImpl) string { + return fmt.Sprintf("[xds-client %p] ", p) +} + +func authorityPrefix(p *clientImpl, serverURI string) string { + return fmt.Sprintf("%s[%s] ", clientPrefix(p), serverURI) +} diff --git a/xds/internal/xdsclient/requests_counter.go b/xds/internal/xdsclient/requests_counter.go new file mode 100644 index 000000000000..beed2e9d0add --- /dev/null +++ b/xds/internal/xdsclient/requests_counter.go @@ -0,0 +1,107 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync" + "sync/atomic" +) + +type clusterNameAndServiceName struct { + clusterName, edsServcieName string +} + +type clusterRequestsCounter struct { + mu sync.Mutex + clusters map[clusterNameAndServiceName]*ClusterRequestsCounter +} + +var src = &clusterRequestsCounter{ + clusters: make(map[clusterNameAndServiceName]*ClusterRequestsCounter), +} + +// ClusterRequestsCounter is used to track the total inflight requests for a +// service with the provided name. +type ClusterRequestsCounter struct { + ClusterName string + EDSServiceName string + numRequests uint32 +} + +// GetClusterRequestsCounter returns the ClusterRequestsCounter with the +// provided serviceName. If one does not exist, it creates it. +func GetClusterRequestsCounter(clusterName, edsServiceName string) *ClusterRequestsCounter { + src.mu.Lock() + defer src.mu.Unlock() + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServcieName: edsServiceName, + } + c, ok := src.clusters[k] + if !ok { + c = &ClusterRequestsCounter{ClusterName: clusterName} + src.clusters[k] = c + } + return c +} + +// StartRequest starts a request for a cluster, incrementing its number of +// requests by 1. Returns an error if the max number of requests is exceeded. +func (c *ClusterRequestsCounter) StartRequest(max uint32) error { + // Note that during race, the limits could be exceeded. This is allowed: + // "Since the implementation is eventually consistent, races between threads + // may allow limits to be potentially exceeded." + // https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/circuit_breaking#arch-overview-circuit-break. + if atomic.LoadUint32(&c.numRequests) >= max { + return fmt.Errorf("max requests %v exceeded on service %v", max, c.ClusterName) + } + atomic.AddUint32(&c.numRequests, 1) + return nil +} + +// EndRequest ends a request for a service, decrementing its number of requests +// by 1. +func (c *ClusterRequestsCounter) EndRequest() { + atomic.AddUint32(&c.numRequests, ^uint32(0)) +} + +// ClearCounterForTesting clears the counter for the service. Should be only +// used in tests. +func ClearCounterForTesting(clusterName, edsServiceName string) { + src.mu.Lock() + defer src.mu.Unlock() + k := clusterNameAndServiceName{ + clusterName: clusterName, + edsServcieName: edsServiceName, + } + c, ok := src.clusters[k] + if !ok { + return + } + c.numRequests = 0 +} + +// ClearAllCountersForTesting clears all the counters. Should be only used in +// tests. +func ClearAllCountersForTesting() { + src.mu.Lock() + defer src.mu.Unlock() + src.clusters = make(map[clusterNameAndServiceName]*ClusterRequestsCounter) +} diff --git a/xds/internal/client/requests_counter_test.go b/xds/internal/xdsclient/requests_counter_test.go similarity index 76% rename from xds/internal/client/requests_counter_test.go rename to xds/internal/xdsclient/requests_counter_test.go index fe532724d14e..e2eeea774e24 100644 --- a/xds/internal/client/requests_counter_test.go +++ b/xds/internal/xdsclient/requests_counter_test.go @@ -16,7 +16,7 @@ * */ -package client +package xdsclient import ( "sync" @@ -24,6 +24,8 @@ import ( "testing" ) +const testService = "test-service-name" + type counterTest struct { name string maxRequests uint32 @@ -49,9 +51,9 @@ var tests = []counterTest{ }, } -func resetServiceRequestsCounter() { - src = &servicesRequestsCounter{ - services: make(map[string]*ServiceRequestsCounter), +func resetClusterRequestsCounter() { + src = &clusterRequestsCounter{ + clusters: make(map[clusterNameAndServiceName]*ClusterRequestsCounter), } } @@ -65,7 +67,7 @@ func testCounter(t *testing.T, test counterTest) { var successes, errors uint32 for i := 0; i < int(test.numRequests); i++ { go func() { - counter := GetServiceRequestsCounter(test.name) + counter := GetClusterRequestsCounter(test.name, testService) defer requestsDone.Done() err := counter.StartRequest(test.maxRequests) if err == nil { @@ -91,13 +93,17 @@ func testCounter(t *testing.T, test counterTest) { if test.expectedErrors == 0 && loadedError != nil { t.Errorf("error starting request: %v", loadedError.(error)) } - if successes != test.expectedSuccesses || errors != test.expectedErrors { + // We allow the limits to be exceeded during races. + // + // But we should never over-limit, so this test fails if there are less + // successes than expected. + if successes < test.expectedSuccesses || errors > test.expectedErrors { t.Errorf("unexpected number of (successes, errors), expected (%v, %v), encountered (%v, %v)", test.expectedSuccesses, test.expectedErrors, successes, errors) } } func (s) TestRequestsCounter(t *testing.T) { - defer resetServiceRequestsCounter() + defer resetClusterRequestsCounter() for _, test := range tests { t.Run(test.name, func(t *testing.T) { testCounter(t, test) @@ -105,18 +111,18 @@ func (s) TestRequestsCounter(t *testing.T) { } } -func (s) TestGetServiceRequestsCounter(t *testing.T) { - defer resetServiceRequestsCounter() +func (s) TestGetClusterRequestsCounter(t *testing.T) { + defer resetClusterRequestsCounter() for _, test := range tests { - counterA := GetServiceRequestsCounter(test.name) - counterB := GetServiceRequestsCounter(test.name) + counterA := GetClusterRequestsCounter(test.name, testService) + counterB := GetClusterRequestsCounter(test.name, testService) if counterA != counterB { t.Errorf("counter %v %v != counter %v %v", counterA, *counterA, counterB, *counterB) } } } -func startRequests(t *testing.T, n uint32, max uint32, counter *ServiceRequestsCounter) { +func startRequests(t *testing.T, n uint32, max uint32, counter *ClusterRequestsCounter) { for i := uint32(0); i < n; i++ { if err := counter.StartRequest(max); err != nil { t.Fatalf("error starting initial request: %v", err) @@ -125,11 +131,11 @@ func startRequests(t *testing.T, n uint32, max uint32, counter *ServiceRequestsC } func (s) TestSetMaxRequestsIncreased(t *testing.T) { - defer resetServiceRequestsCounter() - const serviceName string = "set-max-requests-increased" + defer resetClusterRequestsCounter() + const clusterName string = "set-max-requests-increased" var initialMax uint32 = 16 - counter := GetServiceRequestsCounter(serviceName) + counter := GetClusterRequestsCounter(clusterName, testService) startRequests(t, initialMax, initialMax, counter) if err := counter.StartRequest(initialMax); err == nil { t.Fatal("unexpected success on start request after max met") @@ -142,11 +148,11 @@ func (s) TestSetMaxRequestsIncreased(t *testing.T) { } func (s) TestSetMaxRequestsDecreased(t *testing.T) { - defer resetServiceRequestsCounter() - const serviceName string = "set-max-requests-decreased" + defer resetClusterRequestsCounter() + const clusterName string = "set-max-requests-decreased" var initialMax uint32 = 16 - counter := GetServiceRequestsCounter(serviceName) + counter := GetClusterRequestsCounter(clusterName, testService) startRequests(t, initialMax-1, initialMax, counter) newMax := initialMax - 1 diff --git a/xds/internal/xdsclient/singleton.go b/xds/internal/xdsclient/singleton.go new file mode 100644 index 000000000000..96db8ef51387 --- /dev/null +++ b/xds/internal/xdsclient/singleton.go @@ -0,0 +1,115 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" +) + +const ( + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleAuthorityDeleteTimeout = 5 * time.Minute +) + +var ( + // This is the client returned by New(). It contains one client implementation, + // and maintains the refcount. + singletonMu sync.Mutex + singletonClient *clientRefCounted + + // The following functions are no-ops in the actual code, but can be + // overridden in tests to give them visibility into certain events. + singletonClientImplCreateHook = func() {} + singletonClientImplCloseHook = func() {} +) + +// To override in tests. +var bootstrapNewConfig = bootstrap.NewConfig + +func clientRefCountedClose() { + singletonMu.Lock() + defer singletonMu.Unlock() + + if singletonClient.decrRef() != 0 { + return + } + singletonClient.clientImpl.close() + singletonClientImplCloseHook() + singletonClient = nil +} + +func newRefCountedWithConfig(fallbackConfig *bootstrap.Config) (XDSClient, func(), error) { + singletonMu.Lock() + defer singletonMu.Unlock() + + if singletonClient != nil { + singletonClient.incrRef() + return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil + + } + + // Use fallbackConfig only if bootstrap env vars are unspecified. + var config *bootstrap.Config + if envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" { + if fallbackConfig == nil { + return nil, nil, fmt.Errorf("xds: bootstrap env vars are unspecified and provided fallback config is nil") + } + config = fallbackConfig + } else { + var err error + config, err = bootstrapNewConfig() + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) + } + } + + // Create the new client implementation. + c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) + if err != nil { + return nil, nil, err + } + singletonClient = &clientRefCounted{clientImpl: c, refCount: 1} + singletonClientImplCreateHook() + + logger.Infof("xDS node ID: %s", config.NodeProto.GetId()) + return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil +} + +// clientRefCounted is ref-counted, and to be shared by the xds resolver and +// balancer implementations, across multiple ClientConns and Servers. +type clientRefCounted struct { + *clientImpl + + refCount int32 // accessed atomically +} + +func (c *clientRefCounted) incrRef() int32 { + return atomic.AddInt32(&c.refCount, 1) +} + +func (c *clientRefCounted) decrRef() int32 { + return atomic.AddInt32(&c.refCount, -1) +} diff --git a/xds/internal/xdsclient/singleton_test.go b/xds/internal/xdsclient/singleton_test.go new file mode 100644 index 000000000000..1875ea118d09 --- /dev/null +++ b/xds/internal/xdsclient/singleton_test.go @@ -0,0 +1,124 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "context" + "testing" + + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" +) + +// Test that multiple New() returns the same Client. And only when the last +// client is closed, the underlying client is closed. +func (s) TestClientNewSingleton(t *testing.T) { + // Create a bootstrap configuration, place it in a file in the temp + // directory, and set the bootstrap env vars to point to it. + nodeID := uuid.New().String() + cleanup, err := bootstrap.CreateFile(bootstrap.Options{ + NodeID: nodeID, + ServerURI: "non-existent-server-address", + }) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + // Override the singleton creation hook to get notified. + origSingletonClientImplCreateHook := singletonClientImplCreateHook + singletonCreationCh := testutils.NewChannel() + singletonClientImplCreateHook = func() { + singletonCreationCh.Replace(nil) + } + defer func() { singletonClientImplCreateHook = origSingletonClientImplCreateHook }() + + // Override the singleton close hook to get notified. + origSingletonClientImplCloseHook := singletonClientImplCloseHook + singletonCloseCh := testutils.NewChannel() + singletonClientImplCloseHook = func() { + singletonCloseCh.Replace(nil) + } + defer func() { singletonClientImplCloseHook = origSingletonClientImplCloseHook }() + + // The first call to New() should create a new singleton client. + _, closeFunc, err := New() + if err != nil { + t.Fatalf("failed to create xDS client: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := singletonCreationCh.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for singleton xDS client to be created: %v", err) + } + + // Calling New() again should not create new singleton client implementations. + const count = 9 + closeFuncs := make([]func(), 9) + for i := 0; i < count; i++ { + func() { + _, closeFuncs[i], err = New() + if err != nil { + t.Fatalf("%d-th call to New() failed with error: %v", i, err) + } + + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := singletonCreationCh.Receive(sCtx); err == nil { + t.Fatalf("%d-th call to New() created a new singleton client", i) + } + }() + } + + // Call Close() multiple times on each of the clients created in the above for + // loop. Close() calls are idempotent, and the underlying client + // implementation will not be closed until we release the first reference we + // acquired above, via the first call to New(). + for i := 0; i < count; i++ { + func() { + closeFuncs[i]() + closeFuncs[i]() + + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := singletonCloseCh.Receive(sCtx); err == nil { + t.Fatal("singleton client implementation closed before all references are released") + } + }() + } + + // Call the last Close(). The underlying implementation should be closed. + closeFunc() + if _, err := singletonCloseCh.Receive(ctx); err != nil { + t.Fatalf("Timeout waiting for singleton client implementation to be closed: %v", err) + } + + // Calling New() again, after the previous Client was actually closed, should + // create a new one. + _, closeFunc, err = New() + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + defer closeFunc() + if _, err := singletonCreationCh.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for singleton xDS client to be created: %v", err) + } +} diff --git a/xds/internal/xdsclient/tests/authority_test.go b/xds/internal/xdsclient/tests/authority_test.go new file mode 100644 index 000000000000..0345f4a4040c --- /dev/null +++ b/xds/internal/xdsclient/tests/authority_test.go @@ -0,0 +1,303 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +const ( + testAuthority1 = "test-authority1" + testAuthority2 = "test-authority2" + testAuthority3 = "test-authority3" +) + +var ( + // These two resources use `testAuthority1`, which contains an empty server + // config in the bootstrap file, and therefore will use the default + // management server. + authorityTestResourceName11 = xdstestutils.BuildResourceName(xdsresource.ClusterResourceTypeName, testAuthority1, cdsName+"1", nil) + authorityTestResourceName12 = xdstestutils.BuildResourceName(xdsresource.ClusterResourceTypeName, testAuthority1, cdsName+"2", nil) + // This resource uses `testAuthority2`, which contains an empty server + // config in the bootstrap file, and therefore will use the default + // management server. + authorityTestResourceName2 = xdstestutils.BuildResourceName(xdsresource.ClusterResourceTypeName, testAuthority2, cdsName+"3", nil) + // This resource uses `testAuthority3`, which contains a non-empty server + // config in the bootstrap file, and therefore will use the non-default + // management server. + authorityTestResourceName3 = xdstestutils.BuildResourceName(xdsresource.ClusterResourceTypeName, testAuthority3, cdsName+"3", nil) +) + +// setupForAuthorityTests spins up two management servers, one to act as the +// default and the other to act as the non-default. It also generates a +// bootstrap configuration with three authorities (the first two pointing to the +// default and the third one pointing to the non-default). +// +// Returns two listeners used by the default and non-default management servers +// respectively, and the xDS client and its close function. +func setupForAuthorityTests(ctx context.Context, t *testing.T, idleTimeout time.Duration) (*testutils.ListenerWrapper, *testutils.ListenerWrapper, xdsclient.XDSClient, func()) { + overrideFedEnvVar(t) + + // Create listener wrappers which notify on to a channel whenever a new + // connection is accepted. We use this to track the number of transports + // used by the xDS client. + lisDefault := testutils.NewListenerWrapper(t, nil) + lisNonDefault := testutils.NewListenerWrapper(t, nil) + + // Start a management server to act as the default authority. + defaultAuthorityServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{Listener: lisDefault}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(func() { defaultAuthorityServer.Stop() }) + + // Start a management server to act as the non-default authority. + nonDefaultAuthorityServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{Listener: lisNonDefault}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(func() { nonDefaultAuthorityServer.Stop() }) + + // Create a bootstrap configuration with two non-default authorities which + // have empty server configs, and therefore end up using the default server + // config, which points to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, defaultAuthorityServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + Authorities: map[string]*bootstrap.Authority{ + testAuthority1: {}, + testAuthority2: {}, + testAuthority3: {XDSServer: xdstestutils.ServerConfigForAddress(t, nonDefaultAuthorityServer.Address)}, + }, + }, defaultTestWatchExpiryTimeout, idleTimeout) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + e2e.DefaultCluster(authorityTestResourceName11, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(authorityTestResourceName12, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(authorityTestResourceName2, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(authorityTestResourceName3, edsName, e2e.SecurityLevelNone), + }, + SkipValidation: true, + } + if err := defaultAuthorityServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + return lisDefault, lisNonDefault, client, close +} + +// TestAuthorityShare tests the authority sharing logic. The test verifies the +// following scenarios: +// - A watch for a resource name with an authority matching an existing watch +// should not result in a new transport being created. +// - A watch for a resource name with different authority name but same +// authority config as an existing watch should not result in a new transport +// being created. +func (s) TestAuthorityShare(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + lis, _, client, close := setupForAuthorityTests(ctx, t, time.Duration(0)) + defer close() + + // Verify that no connection is established to the management server at this + // point. A transport is created only when a resource (which belongs to that + // authority) is requested. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } + + // Request the first resource. Verify that a new transport is created. + cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) + defer cdsCancel1() + if _, err := lis.NewConnCh.Receive(ctx); err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + + // Request the second resource. Verify that no new transport is created. + cdsCancel2 := client.WatchCluster(authorityTestResourceName12, func(u xdsresource.ClusterUpdate, err error) {}) + defer cdsCancel2() + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } + + // Request the third resource. Verify that no new transport is created. + cdsCancel3 := client.WatchCluster(authorityTestResourceName2, func(u xdsresource.ClusterUpdate, err error) {}) + defer cdsCancel3() + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } +} + +// TestAuthorityIdle test the authority idle timeout logic. The test verifies +// that the xDS client does not close authorities immediately after the last +// watch is canceled, but waits for the configured idle timeout to expire before +// closing them. +func (s) TestAuthorityIdleTimeout(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + lis, _, client, close := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + defer close() + + // Request the first resource. Verify that a new transport is created. + cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) + val, err := lis.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + conn := val.(*testutils.ConnWrapper) + + // Request the second resource. Verify that no new transport is created. + cdsCancel2 := client.WatchCluster(authorityTestResourceName12, func(u xdsresource.ClusterUpdate, err error) {}) + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } + + // Cancel both watches, and verify that the connection to the management + // server is not closed immediately. + cdsCancel1() + cdsCancel2() + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Connection to management server closed unexpectedly") + } + + // Wait for the authority idle timeout to fire. + time.Sleep(2 * defaultTestIdleAuthorityTimeout) + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != nil { + t.Fatal("Connection to management server not closed after idle timeout expiry") + } +} + +// TestAuthorityClientClose verifies that authorities in use and in the idle +// cache are all closed when the client is closed. +func (s) TestAuthorityClientClose(t *testing.T) { + // Set the authority idle timeout to twice the defaultTestTimeout. This will + // ensure that idle authorities stay in the cache for the duration of this + // test, until explicitly closed. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + lisDefault, lisNonDefault, client, close := setupForAuthorityTests(ctx, t, time.Duration(2*defaultTestTimeout)) + + // Request the first resource. Verify that a new transport is created to the + // default management server. + cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) + val, err := lisDefault.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + connDefault := val.(*testutils.ConnWrapper) + + // Request another resource which is served by the non-default authority. + // Verify that a new transport is created to the non-default management + // server. + client.WatchCluster(authorityTestResourceName3, func(u xdsresource.ClusterUpdate, err error) {}) + val, err = lisNonDefault.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + connNonDefault := val.(*testutils.ConnWrapper) + + // Cancel the first watch. This should move the default authority to the + // idle cache, but the connection should not be closed yet, because the idle + // timeout would not have fired. + cdsCancel1() + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := connDefault.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Connection to management server closed unexpectedly") + } + + // Closing the xDS client should close the connection to both management + // servers, even though we have an open watch to one of them. + close() + if _, err := connDefault.CloseCh.Receive(ctx); err != nil { + t.Fatal("Connection to management server not closed after client close") + } + if _, err := connNonDefault.CloseCh.Receive(ctx); err != nil { + t.Fatal("Connection to management server not closed after client close") + } +} + +// TestAuthorityRevive verifies that an authority in the idle cache is revived +// when a new watch is started on this authority. +func (s) TestAuthorityRevive(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + lis, _, client, close := setupForAuthorityTests(ctx, t, defaultTestIdleAuthorityTimeout) + defer close() + + // Request the first resource. Verify that a new transport is created. + cdsCancel1 := client.WatchCluster(authorityTestResourceName11, func(u xdsresource.ClusterUpdate, err error) {}) + val, err := lis.NewConnCh.Receive(ctx) + if err != nil { + t.Fatalf("Timed out when waiting for a new transport to be created to the management server: %v", err) + } + conn := val.(*testutils.ConnWrapper) + + // Cancel the above watch. This should move the authority to the idle cache. + cdsCancel1() + + // Request the second resource. Verify that no new transport is created. + // This should move the authority out of the idle cache. + cdsCancel2 := client.WatchCluster(authorityTestResourceName12, func(u xdsresource.ClusterUpdate, err error) {}) + defer cdsCancel2() + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := lis.NewConnCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Unexpected new transport created to management server") + } + + // Wait for double the idle timeout, and the connection to the management + // server should not be closed, since it was revived from the idle cache. + time.Sleep(2 * defaultTestIdleAuthorityTimeout) + sCtx, sCancel = context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if _, err := conn.CloseCh.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("Connection to management server closed unexpectedly") + } +} diff --git a/xds/internal/xdsclient/tests/cds_watchers_test.go b/xds/internal/xdsclient/tests/cds_watchers_test.go new file mode 100644 index 000000000000..9670caaca0a6 --- /dev/null +++ b/xds/internal/xdsclient/tests/cds_watchers_test.go @@ -0,0 +1,956 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +// badClusterResource returns a cluster resource for the given name which +// contains a config_source_specifier for the `lrs_server` field which is not +// set to `self`, and hence is expected to be NACKed by the client. +func badClusterResource(clusterName, edsServiceName string, secLevel e2e.SecurityLevel) *v3clusterpb.Cluster { + cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel) + cluster.LrsServer = &v3corepb.ConfigSource{ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{}} + return cluster +} + +// xdsClient is expected to produce an error containing this string when an +// update is received containing a cluster created using `badClusterResource`. +const wantClusterNACKErr = "unsupported config_source_specifier" + +// verifyClusterUpdate waits for an update to be received on the provided update +// channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ClusterUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a cluster resource from the management server: %v", err) + } + got := u.(xdsresource.ClusterUpdateErrTuple) + if wantUpdate.Err != nil { + if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy")} + if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the cluster resource update: (-want, got):\n%s", diff) + } + return nil +} + +// verifyNoClusterUpdate verifies that no cluster update is received on the +// provided update channel, and returns an error if an update is received. +// +// A very short deadline is used while waiting for the update, as this function +// is intended to be used when an update is not expected. +func verifyNoClusterUpdate(ctx context.Context, updateCh *testutils.Channel) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + return fmt.Errorf("received unexpected ClusterUpdate when expecting none: %v", u) + } + return nil +} + +// TestCDSWatch covers the case where a single watcher exists for a single +// cluster resource. The test verifies the following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. +// +// The test is run for old and new style names. +func (s) TestCDSWatch(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3clusterpb.Cluster // The resource being watched. + updatedWatchedResource *v3clusterpb.Cluster // The watched resource after an update. + notWatchedResource *v3clusterpb.Cluster // A resource which is not being watched. + wantUpdate xdsresource.ClusterUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: cdsName, + watchedResource: e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone), + updatedWatchedResource: e2e.DefaultCluster(cdsName, "new-eds-resource", e2e.SecurityLevelNone), + notWatchedResource: e2e.DefaultCluster("unsubscribed-cds-resource", edsName, e2e.SecurityLevelNone), + wantUpdate: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + }, + }, + { + desc: "new style resource", + resourceName: cdsNameNewStyle, + watchedResource: e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone), + updatedWatchedResource: e2e.DefaultCluster(cdsNameNewStyle, "new-eds-resource", e2e.SecurityLevelNone), + notWatchedResource: e2e.DefaultCluster("unsubscribed-cds-resource", edsNameNewStyle, e2e.SecurityLevelNone), + wantUpdate: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsNameNewStyle, + EDSServiceName: edsNameNewStyle, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a cluster resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + cdsCancel := client.WatchCluster(test.resourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single cluster + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyClusterUpdate(ctx, updateCh, test.wantUpdate); err != nil { + t.Fatal(err) + } + + // Configure the management server to return an additional cluster + // resource, one that we are not interested in. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.watchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoClusterUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + + // Cancel the watch and update the resource corresponding to the original + // watch. Ensure that the cancelled watch callback is not invoked. + cdsCancel() + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.updatedWatchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoClusterUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestCDSWatch_TwoWatchesForSameResourceName covers the case where two watchers +// exist for a single cluster resource. The test verifies the following +// scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. A new update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. +// +// The test is run for old and new style names. +func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3clusterpb.Cluster // The resource being watched. + updatedWatchedResource *v3clusterpb.Cluster // The watched resource after an update. + wantUpdateV1 xdsresource.ClusterUpdateErrTuple + wantUpdateV2 xdsresource.ClusterUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: cdsName, + watchedResource: e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone), + updatedWatchedResource: e2e.DefaultCluster(cdsName, "new-eds-resource", e2e.SecurityLevelNone), + wantUpdateV1: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + }, + wantUpdateV2: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: "new-eds-resource", + }, + }, + }, + { + desc: "new style resource", + resourceName: cdsNameNewStyle, + watchedResource: e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone), + updatedWatchedResource: e2e.DefaultCluster(cdsNameNewStyle, "new-eds-resource", e2e.SecurityLevelNone), + wantUpdateV1: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsNameNewStyle, + EDSServiceName: edsNameNewStyle, + }, + }, + wantUpdateV2: xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsNameNewStyle, + EDSServiceName: "new-eds-resource", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for the same cluster resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(test.resourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(test.resourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single cluster + // resource, corresponding to the one we registered watches for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyClusterUpdate(ctx, updateCh1, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh2, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + + // Cancel the second watch and force the management server to push a + // redundant update for the resource being watched. Neither of the + // two watch callbacks should be invoked. + cdsCancel2() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoClusterUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + if err := verifyNoClusterUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update to the resource being watched. The un-cancelled callback + // should be invoked while the cancelled one should not be. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{test.updatedWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyClusterUpdate(ctx, updateCh1, test.wantUpdateV2); err != nil { + t.Fatal(err) + } + if err := verifyNoClusterUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestCDSWatch_ThreeWatchesForDifferentResourceNames covers the case where +// three watchers (two watchers for one resource, and the third watcher for +// another resource) exist across two cluster resources (one with an old style +// name and one with a new style name). The test verifies that an update from +// the management server containing both resources results in the invocation of +// all watch callbacks. +func (s) TestCDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for the same cluster resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Register the third watch for a different cluster resource, and push the + // received updates onto a channel. + updateCh3 := testutils.NewChannel() + cdsCancel3 := client.WatchCluster(cdsNameNewStyle, func(u xdsresource.ClusterUpdate, err error) { + updateCh3.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel3() + + // Configure the management server to return two cluster resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the all watchers. + wantUpdate12 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + } + wantUpdate3 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsNameNewStyle, + EDSServiceName: edsNameNewStyle, + }, + } + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate12); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate12); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh3, wantUpdate3); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_ResourceCaching covers the case where a watch is registered for +// a resource which is already present in the cache. The test verifies that the +// watch callback is invoked with the contents from the cache, instead of a +// request being sent to the management server. +func (s) TestCDSWatch_ResourceCaching(t *testing.T) { + overrideFedEnvVar(t) + firstRequestReceived := false + firstAckReceived := grpcsync.NewEvent() + secondRequestReceived := grpcsync.NewEvent() + + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // The first request has an empty version string. + if !firstRequestReceived && req.GetVersionInfo() == "" { + firstRequestReceived = true + return nil + } + // The first ack has a non-empty version string. + if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" { + firstAckReceived.Fire() + return nil + } + // Any requests after the first request and ack, are not expected. + secondRequestReceived.Fire() + return nil + }, + }) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a cluster resource and have the watch + // callback push the received update on to a channel. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + + // Configure the management server to return a single cluster + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + } + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for receipt of ACK at the management server") + case <-firstAckReceived.Done(): + } + + // Register another watch for the same resource. This should get the update + // from the cache. + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + // No request should get sent out as part of this watch. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-secondRequestReceived.Done(): + t.Fatal("xdsClient sent out request instead of using update from cache") + } +} + +// TestCDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client +// does not receive an CDS response for the request that it sends. The test +// verifies that the watch callback is invoked with an error once the +// watchExpiryTimer fires. +func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Register a watch for a resource which is expected to be invoked with an + // error after the watch expiry timer fires. + updateCh := testutils.NewChannel() + cdsCancel := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel() + + // Wait for the watch expiry timer to fire. + <-time.After(defaultTestWatchExpiryTimeout) + + // Verify that an empty update with the expected error is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") + if err := verifyClusterUpdate(ctx, updateCh, xdsresource.ClusterUpdateErrTuple{Err: wantErr}); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the +// client receives a valid LDS response for the request that it sends. The test +// verifies that the behavior associated with the expiry timer (i.e, callback +// invocation with error) does not take place. +func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Register a watch for a cluster resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + cdsCancel := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel() + + // Configure the management server to return a single cluster resource, + // corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: cdsName, + EDSServiceName: edsName, + }, + } + if err := verifyClusterUpdate(ctx, updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Wait for the watch expiry timer to fire, and verify that the callback is + // not invoked. + <-time.After(defaultTestWatchExpiryTimeout) + if err := verifyNoClusterUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_ResourceRemoved covers the cases where two watchers exists for +// two different resources (one with an old style name and one with a new style +// name). One of these resources being watched is removed from the management +// server. The test verifies the following scenarios: +// 1. Removing a resource should trigger the watch callback associated with that +// resource with a resource removed error. It should not trigger the watch +// callback for an unrelated resource. +// 2. An update to other resource should result in the invocation of the watch +// callback associated with that resource. It should not result in the +// invocation of the watch callback associated with the deleted resource. +func (s) TesCDSWatch_ResourceRemoved(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for two cluster resources and have the + // callbacks push the received updates on to a channel. + resourceName1 := cdsName + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(resourceName1, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + resourceName2 := cdsNameNewStyle + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(resourceName2, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server to return two cluster resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for both watchers. + wantUpdate1 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName1, + EDSServiceName: edsName, + }, + } + wantUpdate2 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName2, + EDSServiceName: edsNameNewStyle, + }, + } + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate1); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate2); err != nil { + t.Fatal(err) + } + + // Remove the first cluster resource on the management server. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // The first watcher should receive a resource removed error, while the + // second watcher should not receive an update. + if err := verifyClusterUpdate(ctx, updateCh1, xdsresource.ClusterUpdateErrTuple{Err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "")}); err != nil { + t.Fatal(err) + } + if err := verifyNoClusterUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update the second cluster resource on the management server. The first + // watcher should not receive an update, while the second watcher should. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName2, "new-eds-resource", e2e.SecurityLevelNone)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoClusterUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName2, + EDSServiceName: "new-eds-resource", + }, + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_NACKError covers the case where an update from the management +// server is NACK'ed by the xdsclient. The test verifies that the error is +// propagated to the watcher. +func (s) TestCDSWatch_NACKError(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a cluster resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + cdsCancel := client.WatchCluster(cdsName, func(u xdsresource.ClusterUpdate, err error) { + updateCh.SendContext(ctx, xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel() + + // Configure the management server to return a single cluster resource + // which is expected to be NACK'ed by the client. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{badClusterResource(cdsName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher. + u, err := updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a cluster resource from the management server: %v", err) + } + gotErr := u.(xdsresource.ClusterUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantClusterNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantClusterNACKErr) + } +} + +// TestCDSWatch_PartialValid covers the case where a response from the +// management server contains both valid and invalid resources and is expected +// to be NACK'ed by the xdsclient. The test verifies that watchers corresponding +// to the valid resource receive the update, while watchers corresponding to the +// invalid resource receive an error. +func (s) TestCDSWatch_PartialValid(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for cluster resources. The first watch is expected + // to receive an error because the received resource is NACK'ed. The second + // watch is expected to get a good update. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + badResourceName := cdsName + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(badResourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.SendContext(ctx, xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + goodResourceName := cdsNameNewStyle + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(goodResourceName, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.SendContext(ctx, xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server with two cluster resources. One of these + // is a bad resource causing the update to be NACKed. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + badClusterResource(badResourceName, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(goodResourceName, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher which is + // watching the bad resource. + u, err := updateCh1.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a cluster resource from the management server: %v", err) + } + gotErr := u.(xdsresource.ClusterUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantClusterNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantClusterNACKErr) + } + + // Verify that the watcher watching the good resource receives a good + // update. + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: goodResourceName, + EDSServiceName: edsName, + }, + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestCDSWatch_PartialResponse covers the case where a response from the +// management server does not contain all requested resources. CDS responses are +// supposed to contain all requested resources, and the absence of one usually +// indicates that the management server does not know about it. In cases where +// the server has never responded with this resource before, the xDS client is +// expected to wait for the watch timeout to expire before concluding that the +// resource does not exist on the server +func (s) TestCDSWatch_PartialResponse(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for two cluster resources and have the + // callbacks push the received updates on to a channel. + resourceName1 := cdsName + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(resourceName1, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + resourceName2 := cdsNameNewStyle + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(resourceName2, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server to return only one of the two cluster + // resources, corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for first watcher. + wantUpdate1 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName1, + EDSServiceName: edsName, + }, + } + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate1); err != nil { + t.Fatal(err) + } + + // Verify that the second watcher does not get an update with an error. + if err := verifyNoClusterUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Configure the management server to return two cluster resources, + // corresponding to the registered watches. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{ + e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone), + e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the second watcher. + wantUpdate2 := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: resourceName2, + EDSServiceName: edsNameNewStyle, + }, + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate2); err != nil { + t.Fatal(err) + } + + // Verify that the first watcher gets no update, as the first resource did + // not change. + if err := verifyNoClusterUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/tests/dump_test.go b/xds/internal/xdsclient/tests/dump_test.go new file mode 100644 index 000000000000..5f2c5e05e4dd --- /dev/null +++ b/xds/internal/xdsclient/tests/dump_test.go @@ -0,0 +1,261 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" +) + +func compareDump(ctx context.Context, client xdsclient.XDSClient, want map[string]map[string]xdsresource.UpdateWithMD) error { + var lastErr error + for { + if err := ctx.Err(); err != nil { + return fmt.Errorf("Timeout when waiting for expected dump: %v", lastErr) + } + cmpOpts := cmp.Options{ + cmpopts.EquateEmpty(), + cmp.Comparer(func(a, b time.Time) bool { return true }), + cmpopts.EquateErrors(), + protocmp.Transform(), + } + diff := cmp.Diff(want, client.DumpResources(), cmpOpts) + if diff == "" { + return nil + } + lastErr = fmt.Errorf("DumpResources() returned unexpected dump, diff (-want +got):\n%s", diff) + time.Sleep(100 * time.Millisecond) + } +} + +type noopEndpointsWatcher struct{} + +func (noopEndpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) {} +func (noopEndpointsWatcher) OnError(err error) {} +func (noopEndpointsWatcher) OnResourceDoesNotExist() {} + +func (s) TestDumpResources(t *testing.T) { + // Initialize the xDS resources to be used in this test. + ldsTargets := []string{"lds.target.good:0000", "lds.target.good:1111"} + rdsTargets := []string{"route-config-0", "route-config-1"} + cdsTargets := []string{"cluster-0", "cluster-1"} + edsTargets := []string{"endpoints-0", "endpoints-1"} + listeners := make([]*v3listenerpb.Listener, len(ldsTargets)) + listenerAnys := make([]*anypb.Any, len(ldsTargets)) + for i := range ldsTargets { + listeners[i] = e2e.DefaultClientListener(ldsTargets[i], rdsTargets[i]) + listenerAnys[i] = testutils.MarshalAny(listeners[i]) + } + routes := make([]*v3routepb.RouteConfiguration, len(rdsTargets)) + routeAnys := make([]*anypb.Any, len(rdsTargets)) + for i := range rdsTargets { + routes[i] = e2e.DefaultRouteConfig(rdsTargets[i], ldsTargets[i], cdsTargets[i]) + routeAnys[i] = testutils.MarshalAny(routes[i]) + } + clusters := make([]*v3clusterpb.Cluster, len(cdsTargets)) + clusterAnys := make([]*anypb.Any, len(cdsTargets)) + for i := range cdsTargets { + clusters[i] = e2e.DefaultCluster(cdsTargets[i], edsTargets[i], e2e.SecurityLevelNone) + clusterAnys[i] = testutils.MarshalAny(clusters[i]) + } + endpoints := make([]*v3endpointpb.ClusterLoadAssignment, len(edsTargets)) + endpointAnys := make([]*anypb.Any, len(edsTargets)) + ips := []string{"0.0.0.0", "1.1.1.1"} + ports := []uint32{123, 456} + for i := range edsTargets { + endpoints[i] = e2e.DefaultEndpoint(edsTargets[i], ips[i], ports[i:i+1]) + endpointAnys[i] = testutils.MarshalAny(endpoints[i]) + } + + // Spin up an xDS management server on a local port. + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Dump resources and expect empty configs. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := compareDump(ctx, client, nil); err != nil { + t.Fatal(err) + } + + // Register watches, dump resources and expect configs in requested state. + for _, target := range ldsTargets { + client.WatchListener(target, func(xdsresource.ListenerUpdate, error) {}) + } + for _, target := range rdsTargets { + client.WatchRouteConfig(target, func(xdsresource.RouteConfigUpdate, error) {}) + } + for _, target := range cdsTargets { + client.WatchCluster(target, func(xdsresource.ClusterUpdate, error) {}) + } + for _, target := range edsTargets { + xdsresource.WatchEndpoints(client, target, noopEndpointsWatcher{}) + } + want := map[string]map[string]xdsresource.UpdateWithMD{ + "type.googleapis.com/envoy.config.listener.v3.Listener": { + ldsTargets[0]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + ldsTargets[1]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { + rdsTargets[0]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + rdsTargets[1]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + "type.googleapis.com/envoy.config.cluster.v3.Cluster": { + cdsTargets[0]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + cdsTargets[1]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment": { + edsTargets[0]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + edsTargets[1]: {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}}, + }, + } + if err := compareDump(ctx, client, want); err != nil { + t.Fatal(err) + } + + // Configure the resources on the management server. + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: listeners, + Routes: routes, + Clusters: clusters, + Endpoints: endpoints, + }); err != nil { + t.Fatal(err) + } + + // Dump resources and expect ACK configs. + want = map[string]map[string]xdsresource.UpdateWithMD{ + "type.googleapis.com/envoy.config.listener.v3.Listener": { + ldsTargets[0]: {Raw: listenerAnys[0], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + ldsTargets[1]: {Raw: listenerAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + }, + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { + rdsTargets[0]: {Raw: routeAnys[0], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + rdsTargets[1]: {Raw: routeAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + }, + "type.googleapis.com/envoy.config.cluster.v3.Cluster": { + cdsTargets[0]: {Raw: clusterAnys[0], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + cdsTargets[1]: {Raw: clusterAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + }, + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment": { + edsTargets[0]: {Raw: endpointAnys[0], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + edsTargets[1]: {Raw: endpointAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}}, + }, + } + if err := compareDump(ctx, client, want); err != nil { + t.Fatal(err) + } + + // Update the first resource of each type in the management server to a + // value which is expected to be NACK'ed by the xDS client. + const nackResourceIdx = 0 + listeners[nackResourceIdx].ApiListener = &v3listenerpb.ApiListener{} + routes[nackResourceIdx].VirtualHosts = []*v3routepb.VirtualHost{{Routes: []*v3routepb.Route{{}}}} + clusters[nackResourceIdx].ClusterDiscoveryType = &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC} + endpoints[nackResourceIdx].Endpoints = []*v3endpointpb.LocalityLbEndpoints{{}} + if err := mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: listeners, + Routes: routes, + Clusters: clusters, + Endpoints: endpoints, + SkipValidation: true, + }); err != nil { + t.Fatal(err) + } + + // Verify that the xDS client reports the first resource of each type as + // being in "NACKed" state, and the second resource of each type to be in + // "ACKed" state. The version for the ACKed resource would be "2", while + // that for the NACKed resource would be "1". In the NACKed resource, the + // version which is NACKed is stored in the ErrorState field. + want = map[string]map[string]xdsresource.UpdateWithMD{ + "type.googleapis.com/envoy.config.listener.v3.Listener": { + ldsTargets[0]: { + Raw: listenerAnys[0], + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "1", + ErrState: &xdsresource.UpdateErrorMetadata{Version: "2", Err: cmpopts.AnyError}, + }, + }, + ldsTargets[1]: {Raw: listenerAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "2"}}, + }, + "type.googleapis.com/envoy.config.route.v3.RouteConfiguration": { + rdsTargets[0]: { + Raw: routeAnys[0], + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "1", + ErrState: &xdsresource.UpdateErrorMetadata{Version: "2", Err: cmpopts.AnyError}, + }, + }, + rdsTargets[1]: {Raw: routeAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "2"}}, + }, + "type.googleapis.com/envoy.config.cluster.v3.Cluster": { + cdsTargets[0]: { + Raw: clusterAnys[0], + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "1", + ErrState: &xdsresource.UpdateErrorMetadata{Version: "2", Err: cmpopts.AnyError}, + }, + }, + cdsTargets[1]: {Raw: clusterAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "2"}}, + }, + "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment": { + edsTargets[0]: { + Raw: endpointAnys[0], + MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + Version: "1", + ErrState: &xdsresource.UpdateErrorMetadata{Version: "2", Err: cmpopts.AnyError}, + }, + }, + edsTargets[1]: {Raw: endpointAnys[1], MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "2"}}, + }, + } + if err := compareDump(ctx, client, want); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/tests/eds_watchers_test.go b/xds/internal/xdsclient/tests/eds_watchers_test.go new file mode 100644 index 000000000000..0d81c3848c8d --- /dev/null +++ b/xds/internal/xdsclient/tests/eds_watchers_test.go @@ -0,0 +1,853 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +const ( + edsHost1 = "1.foo.bar.com" + edsHost2 = "2.foo.bar.com" + edsHost3 = "3.foo.bar.com" + edsPort1 = 1 + edsPort2 = 2 + edsPort3 = 3 +) + +type endpointsUpdateErrTuple struct { + update xdsresource.EndpointsUpdate + err error +} + +type endpointsWatcher struct { + updateCh *testutils.Channel +} + +func newEndpointsWatcher() *endpointsWatcher { + return &endpointsWatcher{updateCh: testutils.NewChannel()} +} + +func (ew *endpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) { + ew.updateCh.Send(endpointsUpdateErrTuple{update: update.Resource}) +} + +func (ew *endpointsWatcher) OnError(err error) { + ew.updateCh.SendOrFail(endpointsUpdateErrTuple{err: err}) +} + +func (ew *endpointsWatcher) OnResourceDoesNotExist() { + ew.updateCh.SendOrFail(endpointsUpdateErrTuple{err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Endpoints not found in received response")}) +} + +// badEndpointsResource returns a endpoints resource for the given +// edsServiceName which contains an endpoint with a load_balancing weight of +// `0`. This is expected to be NACK'ed by the xDS client. +func badEndpointsResource(edsServiceName string, host string, ports []uint32) *v3endpointpb.ClusterLoadAssignment { + e := e2e.DefaultEndpoint(edsServiceName, host, ports) + e.Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0} + return e +} + +// xdsClient is expected to produce an error containing this string when an +// update is received containing an endpoints resource created using +// `badEndpointsResource`. +const wantEndpointsNACKErr = "EDS response contains an endpoint with zero weight" + +// verifyEndpointsUpdate waits for an update to be received on the provided +// update channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate endpointsUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a endpoints resource from the management server: %v", err) + } + got := u.(endpointsUpdateErrTuple) + if wantUpdate.err != nil { + if gotType, wantType := xdsresource.ErrType(got.err), xdsresource.ErrType(wantUpdate.err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw")} + if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the endpoints resource update: (-want, got):\n%s", diff) + } + return nil +} + +// verifyNoEndpointsUpdate verifies that no endpoints update is received on the +// provided update channel, and returns an error if an update is received. +// +// A very short deadline is used while waiting for the update, as this function +// is intended to be used when an update is not expected. +func verifyNoEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + return fmt.Errorf("unexpected EndpointsUpdate: %v", u) + } + return nil +} + +// TestEDSWatch covers the case where a single endpoint exists for a single +// endpoints resource. The test verifies the following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. +// +// The test is run for old and new style names. +func (s) TestEDSWatch(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3endpointpb.ClusterLoadAssignment // The resource being watched. + updatedWatchedResource *v3endpointpb.ClusterLoadAssignment // The watched resource after an update. + notWatchedResource *v3endpointpb.ClusterLoadAssignment // A resource which is not being watched. + wantUpdate endpointsUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: edsName, + watchedResource: e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}), + updatedWatchedResource: e2e.DefaultEndpoint(edsName, edsHost2, []uint32{edsPort2}), + notWatchedResource: e2e.DefaultEndpoint("unsubscribed-eds-resource", edsHost3, []uint32{edsPort3}), + wantUpdate: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + }, + { + desc: "new style resource", + resourceName: edsNameNewStyle, + watchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}), + updatedWatchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost2, []uint32{edsPort2}), + notWatchedResource: e2e.DefaultEndpoint("unsubscribed-eds-resource", edsHost3, []uint32{edsPort3}), + wantUpdate: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a endpoint resource and have the watch + // callback push the received update on to a channel. + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, test.resourceName, ew) + + // Configure the management server to return a single endpoint + // resource, corresponding to the one being watched. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyEndpointsUpdate(ctx, ew.updateCh, test.wantUpdate); err != nil { + t.Fatal(err) + } + + // Configure the management server to return an additional endpoint + // resource, one that we are not interested in. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.watchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil { + t.Fatal(err) + } + + // Cancel the watch and update the resource corresponding to the original + // watch. Ensure that the cancelled watch callback is not invoked. + edsCancel() + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.updatedWatchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestEDSWatch_TwoWatchesForSameResourceName covers the case where two watchers +// exist for a single endpoint resource. The test verifies the following +// scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. An update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. +// +// The test is run for old and new style names. +func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3endpointpb.ClusterLoadAssignment // The resource being watched. + updatedWatchedResource *v3endpointpb.ClusterLoadAssignment // The watched resource after an update. + wantUpdateV1 endpointsUpdateErrTuple + wantUpdateV2 endpointsUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: edsName, + watchedResource: e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}), + updatedWatchedResource: e2e.DefaultEndpoint(edsName, edsHost2, []uint32{edsPort2}), + wantUpdateV1: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + wantUpdateV2: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + }, + { + desc: "new style resource", + resourceName: edsNameNewStyle, + watchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}), + updatedWatchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost2, []uint32{edsPort2}), + wantUpdateV1: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + wantUpdateV2: endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for the same endpoint resource and have the + // callbacks push the received updates on to a channel. + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, test.resourceName, ew1) + defer edsCancel1() + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, test.resourceName, ew2) + + // Configure the management server to return a single endpoint + // resource, corresponding to the one being watched. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + + // Cancel the second watch and force the management server to push a + // redundant update for the resource being watched. Neither of the + // two watch callbacks should be invoked. + edsCancel2() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoEndpointsUpdate(ctx, ew1.updateCh); err != nil { + t.Fatal(err) + } + if err := verifyNoEndpointsUpdate(ctx, ew2.updateCh); err != nil { + t.Fatal(err) + } + + // Update to the resource being watched. The un-cancelled callback + // should be invoked while the cancelled one should not be. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{test.updatedWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, test.wantUpdateV2); err != nil { + t.Fatal(err) + } + if err := verifyNoEndpointsUpdate(ctx, ew2.updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestEDSWatch_ThreeWatchesForDifferentResourceNames covers the case with three +// watchers (two watchers for one resource, and the third watcher for another +// resource), exist across two endpoint configuration resources. The test verifies +// that an update from the management server containing both resources results +// in the invocation of all watch callbacks. +// +// The test is run with both old and new style names. +func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for the same endpoint resource and have the + // callbacks push the received updates on to a channel. + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, edsName, ew1) + defer edsCancel1() + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, edsName, ew2) + defer edsCancel2() + + // Register the third watch for a different endpoint resource. + ew3 := newEndpointsWatcher() + edsCancel3 := xdsresource.WatchEndpoints(client, edsNameNewStyle, ew3) + defer edsCancel3() + + // Configure the management server to return two endpoint resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}), + e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the all watchers. The two + // resources returned differ only in the resource name. Therefore the + // expected update is the same for all the watchers. + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + } + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyEndpointsUpdate(ctx, ew3.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestEDSWatch_ResourceCaching covers the case where a watch is registered for +// a resource which is already present in the cache. The test verifies that the +// watch callback is invoked with the contents from the cache, instead of a +// request being sent to the management server. +func (s) TestEDSWatch_ResourceCaching(t *testing.T) { + overrideFedEnvVar(t) + firstRequestReceived := false + firstAckReceived := grpcsync.NewEvent() + secondRequestReceived := grpcsync.NewEvent() + + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // The first request has an empty version string. + if !firstRequestReceived && req.GetVersionInfo() == "" { + firstRequestReceived = true + return nil + } + // The first ack has a non-empty version string. + if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" { + firstAckReceived.Fire() + return nil + } + // Any requests after the first request and ack, are not expected. + secondRequestReceived.Fire() + return nil + }, + }) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for an endpoint resource and have the watch callback + // push the received update on to a channel. + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, edsName, ew1) + defer edsCancel1() + + // Configure the management server to return a single endpoint resource, + // corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + } + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for receipt of ACK at the management server") + case <-firstAckReceived.Done(): + } + + // Register another watch for the same resource. This should get the update + // from the cache. + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, edsName, ew2) + defer edsCancel2() + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // No request should get sent out as part of this watch. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-secondRequestReceived.Done(): + t.Fatal("xdsClient sent out request instead of using update from cache") + } +} + +// TestEDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client +// does not receive an EDS response for the request that it sends. The test +// verifies that the watch callback is invoked with an error once the +// watchExpiryTimer fires. +func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Register a watch for a resource which is expected to fail with an error + // after the watch expiry timer fires. + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, edsName, ew) + defer edsCancel() + + // Wait for the watch expiry timer to fire. + <-time.After(defaultTestWatchExpiryTimeout) + + // Verify that an empty update with the expected error is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") + if err := verifyEndpointsUpdate(ctx, ew.updateCh, endpointsUpdateErrTuple{err: wantErr}); err != nil { + t.Fatal(err) + } +} + +// TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the +// client receives a valid EDS response for the request that it sends. The test +// verifies that the behavior associated with the expiry timer (i.e, callback +// invocation with error) does not take place. +func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Register a watch for an endpoint resource and have the watch callback + // push the received update on to a channel. + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, edsName, ew) + defer edsCancel() + + // Configure the management server to return a single endpoint resource, + // corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + } + if err := verifyEndpointsUpdate(ctx, ew.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Wait for the watch expiry timer to fire, and verify that the callback is + // not invoked. + <-time.After(defaultTestWatchExpiryTimeout) + if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil { + t.Fatal(err) + } +} + +// TestEDSWatch_NACKError covers the case where an update from the management +// server is NACK'ed by the xdsclient. The test verifies that the error is +// propagated to the watcher. +func (s) TestEDSWatch_NACKError(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a route configuration resource and have the watch + // callback push the received update on to a channel. + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, edsName, ew) + defer edsCancel() + + // Configure the management server to return a single route configuration + // resource which is expected to be NACKed by the client. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{badEndpointsResource(edsName, edsHost1, []uint32{edsPort1})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher. + u, err := ew.updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for an endpoints resource from the management server: %v", err) + } + gotErr := u.(endpointsUpdateErrTuple).err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantEndpointsNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantEndpointsNACKErr) + } +} + +// TestEDSWatch_PartialValid covers the case where a response from the +// management server contains both valid and invalid resources and is expected +// to be NACK'ed by the xdsclient. The test verifies that watchers corresponding +// to the valid resource receive the update, while watchers corresponding to the +// invalid resource receive an error. +func (s) TestEDSWatch_PartialValid(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for two endpoint resources. The first watch is + // expected to receive an error because the received resource is NACKed. + // The second watch is expected to get a good update. + badResourceName := rdsName + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, badResourceName, ew1) + defer edsCancel1() + goodResourceName := ldsNameNewStyle + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, goodResourceName, ew2) + defer edsCancel2() + + // Configure the management server to return two endpoints resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{ + badEndpointsResource(badResourceName, edsHost1, []uint32{edsPort1}), + e2e.DefaultEndpoint(goodResourceName, edsHost1, []uint32{edsPort1}), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher which + // requested for the bad resource. + u, err := ew1.updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for an endpoints resource from the management server: %v", err) + } + gotErr := u.(endpointsUpdateErrTuple).err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantEndpointsNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantEndpointsNACKErr) + } + + // Verify that the watcher watching the good resource receives an update. + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}}, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + Priority: 0, + Weight: 1, + }, + }, + }, + } + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/tests/federation_watchers_test.go b/xds/internal/xdsclient/tests/federation_watchers_test.go new file mode 100644 index 000000000000..ed59b63ac794 --- /dev/null +++ b/xds/internal/xdsclient/tests/federation_watchers_test.go @@ -0,0 +1,324 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "testing" + + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" +) + +const testNonDefaultAuthority = "non-default-authority" + +// setupForFederationWatchersTest spins up two management servers, one for the +// default (empty) authority and another for a non-default authority. +// +// Returns the management server associated with the non-default authority, the +// nodeID to use, and the xDS client. +func setupForFederationWatchersTest(t *testing.T) (*e2e.ManagementServer, string, xdsclient.XDSClient) { + overrideFedEnvVar(t) + + // Start a management server as the default authority. + serverDefaultAuthority, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(serverDefaultAuthority.Stop) + + // Start another management server as the other authority. + serverNonDefaultAuthority, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + t.Cleanup(serverNonDefaultAuthority.Stop) + + nodeID := uuid.New().String() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: serverDefaultAuthority.Address, + ServerListenerResourceNameTemplate: e2e.ServerListenerResourceNameTemplate, + // Specify the address of the non-default authority. + Authorities: map[string]string{testNonDefaultAuthority: serverNonDefaultAuthority.Address}, + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + t.Cleanup(close) + return serverNonDefaultAuthority, nodeID, client +} + +// TestFederation_ListenerResourceContextParamOrder covers the case of watching +// a Listener resource with the new style resource name and context parameters. +// The test registers watches for two resources which differ only in the order +// of context parameters in their URI. The server is configured to respond with +// a single resource with canonicalized context parameters. The test verifies +// that both watchers are notified. +func (s) TestFederation_ListenerResourceContextParamOrder(t *testing.T) { + serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) + + var ( + // Two resource names only differ in context parameter order. + resourceName1 = fmt.Sprintf("xdstp://%s/envoy.config.listener.v3.Listener/xdsclient-test-lds-resource?a=1&b=2", testNonDefaultAuthority) + resourceName2 = fmt.Sprintf("xdstp://%s/envoy.config.listener.v3.Listener/xdsclient-test-lds-resource?b=2&a=1", testNonDefaultAuthority) + ) + + // Register two watches for listener resources with the same query string, + // but context parameters in different order. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(resourceName1, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(resourceName2, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Configure the management server for the non-default authority to return a + // single listener resource, corresponding to the watches registered above. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(resourceName1, "rds-resource")}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := serverNonDefaultAuthority.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: "rds-resource", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + // Verify the contents of the received update. + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestFederation_RouteConfigResourceContextParamOrder covers the case of +// watching a RouteConfiguration resource with the new style resource name and +// context parameters. The test registers watches for two resources which +// differ only in the order of context parameters in their URI. The server is +// configured to respond with a single resource with canonicalized context +// parameters. The test verifies that both watchers are notified. +func (s) TestFederation_RouteConfigResourceContextParamOrder(t *testing.T) { + serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) + + var ( + // Two resource names only differ in context parameter order. + resourceName1 = fmt.Sprintf("xdstp://%s/envoy.config.route.v3.RouteConfiguration/xdsclient-test-rds-resource?a=1&b=2", testNonDefaultAuthority) + resourceName2 = fmt.Sprintf("xdstp://%s/envoy.config.route.v3.RouteConfiguration/xdsclient-test-rds-resource?b=2&a=1", testNonDefaultAuthority) + ) + + // Register two watches for route configuration resources with the same + // query string, but context parameters in different order. + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(resourceName1, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(resourceName2, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel2() + + // Configure the management server for the non-default authority to return a + // single route config resource, corresponding to the watches registered. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(resourceName1, "listener-resource", "cluster-resource")}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := serverNonDefaultAuthority.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{"listener-resource"}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{"cluster-resource": {Weight: 100}}, + }, + }, + }, + }, + }, + } + // Verify the contents of the received update. + if err := verifyRouteConfigUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestFederation_ClusterResourceContextParamOrder covers the case of watching a +// Cluster resource with the new style resource name and context parameters. +// The test registers watches for two resources which differ only in the order +// of context parameters in their URI. The server is configured to respond with +// a single resource with canonicalized context parameters. The test verifies +// that both watchers are notified. +func (s) TestFederation_ClusterResourceContextParamOrder(t *testing.T) { + serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) + + var ( + // Two resource names only differ in context parameter order. + resourceName1 = fmt.Sprintf("xdstp://%s/envoy.config.cluster.v3.Cluster/xdsclient-test-cds-resource?a=1&b=2", testNonDefaultAuthority) + resourceName2 = fmt.Sprintf("xdstp://%s/envoy.config.cluster.v3.Cluster/xdsclient-test-cds-resource?b=2&a=1", testNonDefaultAuthority) + ) + + // Register two watches for cluster resources with the same query string, + // but context parameters in different order. + updateCh1 := testutils.NewChannel() + cdsCancel1 := client.WatchCluster(resourceName1, func(u xdsresource.ClusterUpdate, err error) { + updateCh1.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel1() + updateCh2 := testutils.NewChannel() + cdsCancel2 := client.WatchCluster(resourceName2, func(u xdsresource.ClusterUpdate, err error) { + updateCh2.Send(xdsresource.ClusterUpdateErrTuple{Update: u, Err: err}) + }) + defer cdsCancel2() + + // Configure the management server for the non-default authority to return a + // single cluster resource, corresponding to the watches registered. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Clusters: []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName1, "eds-service-name", e2e.SecurityLevelNone)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := serverNonDefaultAuthority.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + wantUpdate := xdsresource.ClusterUpdateErrTuple{ + Update: xdsresource.ClusterUpdate{ + ClusterName: "xdstp://non-default-authority/envoy.config.cluster.v3.Cluster/xdsclient-test-cds-resource?a=1&b=2", + EDSServiceName: "eds-service-name", + }, + } + // Verify the contents of the received update. + if err := verifyClusterUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyClusterUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestFederation_EndpointsResourceContextParamOrder covers the case of watching +// an Endpoints resource with the new style resource name and context parameters. +// The test registers watches for two resources which differ only in the order +// of context parameters in their URI. The server is configured to respond with +// a single resource with canonicalized context parameters. The test verifies +// that both watchers are notified. +func (s) TestFederation_EndpointsResourceContextParamOrder(t *testing.T) { + serverNonDefaultAuthority, nodeID, client := setupForFederationWatchersTest(t) + + var ( + // Two resource names only differ in context parameter order. + resourceName1 = fmt.Sprintf("xdstp://%s/envoy.config.endpoint.v3.ClusterLoadAssignment/xdsclient-test-eds-resource?a=1&b=2", testNonDefaultAuthority) + resourceName2 = fmt.Sprintf("xdstp://%s/envoy.config.endpoint.v3.ClusterLoadAssignment/xdsclient-test-eds-resource?b=2&a=1", testNonDefaultAuthority) + ) + + // Register two watches for endpoint resources with the same query string, + // but context parameters in different order. + ew1 := newEndpointsWatcher() + edsCancel1 := xdsresource.WatchEndpoints(client, resourceName1, ew1) + defer edsCancel1() + ew2 := newEndpointsWatcher() + edsCancel2 := xdsresource.WatchEndpoints(client, resourceName2, ew2) + defer edsCancel2() + + // Configure the management server for the non-default authority to return a + // single endpoints resource, corresponding to the watches registered. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Endpoints: []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(resourceName1, "localhost", []uint32{666})}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := serverNonDefaultAuthority.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + wantUpdate := endpointsUpdateErrTuple{ + update: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: "localhost:666", Weight: 1}}, + Weight: 1, + ID: internal.LocalityID{ + Region: "region-1", + Zone: "zone-1", + SubZone: "subzone-1", + }, + }, + }, + }, + } + // Verify the contents of the received update. + if err := verifyEndpointsUpdate(ctx, ew1.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil { + t.Fatal(err) + } +} + +func newStringP(s string) *string { + return &s +} diff --git a/xds/internal/xdsclient/tests/lds_watchers_test.go b/xds/internal/xdsclient/tests/lds_watchers_test.go new file mode 100644 index 000000000000..7e41a81361ff --- /dev/null +++ b/xds/internal/xdsclient/tests/lds_watchers_test.go @@ -0,0 +1,1009 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/envoyproxy/go-control-plane/pkg/wellknown" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + + _ "google.golang.org/grpc/xds" // To ensure internal.NewXDSResolverWithConfigForTesting is set. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. +) + +func overrideFedEnvVar(t *testing.T) { + oldFed := envconfig.XDSFederation + envconfig.XDSFederation = true + t.Cleanup(func() { envconfig.XDSFederation = oldFed }) +} + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestWatchExpiryTimeout = 500 * time.Millisecond + defaultTestIdleAuthorityTimeout = 50 * time.Millisecond + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond // For events expected to *not* happen. + + ldsName = "xdsclient-test-lds-resource" + rdsName = "xdsclient-test-rds-resource" + cdsName = "xdsclient-test-cds-resource" + edsName = "xdsclient-test-eds-resource" + ldsNameNewStyle = "xdstp:///envoy.config.listener.v3.Listener/xdsclient-test-lds-resource" + rdsNameNewStyle = "xdstp:///envoy.config.route.v3.RouteConfiguration/xdsclient-test-rds-resource" + cdsNameNewStyle = "xdstp:///envoy.config.cluster.v3.Cluster/xdsclient-test-cds-resource" + edsNameNewStyle = "xdstp:///envoy.config.endpoint.v3.ClusterLoadAssignment/xdsclient-test-eds-resource" +) + +// badListenerResource returns a listener resource for the given name which does +// not contain the `RouteSpecifier` field in the HTTPConnectionManager, and +// hence is expected to be NACKed by the client. +func badListenerResource(name string) *v3listenerpb.Listener { + hcm := testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("router", &v3routerpb.Router{})}, + }) + return &v3listenerpb.Listener{ + Name: name, + ApiListener: &v3listenerpb.ApiListener{ApiListener: hcm}, + FilterChains: []*v3listenerpb.FilterChain{{ + Name: "filter-chain-name", + Filters: []*v3listenerpb.Filter{{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &v3listenerpb.Filter_TypedConfig{TypedConfig: hcm}, + }}, + }}, + } +} + +// xdsClient is expected to produce an error containing this string when an +// update is received containing a listener created using `badListenerResource`. +const wantListenerNACKErr = "no RouteSpecifier" + +// verifyNoListenerUpdate verifies that no listener update is received on the +// provided update channel, and returns an error if an update is received. +// +// A very short deadline is used while waiting for the update, as this function +// is intended to be used when an update is not expected. +func verifyNoListenerUpdate(ctx context.Context, updateCh *testutils.Channel) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + return fmt.Errorf("unexpected ListenerUpdate: %v", u) + } + return nil +} + +// verifyListenerUpdate waits for an update to be received on the provided +// update channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyListenerUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.ListenerUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a listener resource from the management server: %v", err) + } + got := u.(xdsresource.ListenerUpdateErrTuple) + if wantUpdate.Err != nil { + if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.HTTPFilter{}, "Filter", "Config"), + cmpopts.IgnoreFields(xdsresource.ListenerUpdate{}, "Raw"), + } + if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the listener resource update: (-want, got):\n%s", diff) + } + return nil +} + +// TestLDSWatch covers the case where a single watcher exists for a single +// listener resource. The test verifies the following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. +// +// The test is run for old and new style names. +func (s) TestLDSWatch(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3listenerpb.Listener // The resource being watched. + updatedWatchedResource *v3listenerpb.Listener // The watched resource after an update. + notWatchedResource *v3listenerpb.Listener // A resource which is not being watched. + wantUpdate xdsresource.ListenerUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: ldsName, + watchedResource: e2e.DefaultClientListener(ldsName, rdsName), + updatedWatchedResource: e2e.DefaultClientListener(ldsName, "new-rds-resource"), + notWatchedResource: e2e.DefaultClientListener("unsubscribed-lds-resource", rdsName), + wantUpdate: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + }, + { + desc: "new style resource", + resourceName: ldsNameNewStyle, + watchedResource: e2e.DefaultClientListener(ldsNameNewStyle, rdsNameNewStyle), + updatedWatchedResource: e2e.DefaultClientListener(ldsNameNewStyle, "new-rds-resource"), + notWatchedResource: e2e.DefaultClientListener("unsubscribed-lds-resource", rdsNameNewStyle), + wantUpdate: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsNameNewStyle, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a listener resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + ldsCancel := client.WatchListener(test.resourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single listener + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyListenerUpdate(ctx, updateCh, test.wantUpdate); err != nil { + t.Fatal(err) + } + + // Configure the management server to return an additional listener + // resource, one that we are not interested in. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.watchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoListenerUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + + // Cancel the watch and update the resource corresponding to the original + // watch. Ensure that the cancelled watch callback is not invoked. + ldsCancel() + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.updatedWatchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoListenerUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestLDSWatch_TwoWatchesForSameResourceName covers the case where two watchers +// exist for a single listener resource. The test verifies the following +// scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. An update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. +// +// The test is run for old and new style names. +func (s) TestLDSWatch_TwoWatchesForSameResourceName(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3listenerpb.Listener // The resource being watched. + updatedWatchedResource *v3listenerpb.Listener // The watched resource after an update. + wantUpdateV1 xdsresource.ListenerUpdateErrTuple + wantUpdateV2 xdsresource.ListenerUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: ldsName, + watchedResource: e2e.DefaultClientListener(ldsName, rdsName), + updatedWatchedResource: e2e.DefaultClientListener(ldsName, "new-rds-resource"), + wantUpdateV1: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + wantUpdateV2: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: "new-rds-resource", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + }, + { + desc: "new style resource", + resourceName: ldsNameNewStyle, + watchedResource: e2e.DefaultClientListener(ldsNameNewStyle, rdsNameNewStyle), + updatedWatchedResource: e2e.DefaultClientListener(ldsNameNewStyle, "new-rds-resource"), + wantUpdateV1: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsNameNewStyle, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + wantUpdateV2: xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: "new-rds-resource", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for the same listener resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(test.resourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(test.resourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single listener + // resource, corresponding to the one we registered watches for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyListenerUpdate(ctx, updateCh1, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh2, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + + // Cancel the second watch and force the management server to push a + // redundant update for the resource being watched. Neither of the + // two watch callbacks should be invoked. + ldsCancel2() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoListenerUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + if err := verifyNoListenerUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update to the resource being watched. The un-cancelled callback + // should be invoked while the cancelled one should not be. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{test.updatedWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyListenerUpdate(ctx, updateCh1, test.wantUpdateV2); err != nil { + t.Fatal(err) + } + if err := verifyNoListenerUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestLDSWatch_ThreeWatchesForDifferentResourceNames covers the case with three +// watchers (two watchers for one resource, and the third watcher for another +// resource), exist across two listener resources. The test verifies that an +// update from the management server containing both resources results in the +// invocation of all watch callbacks. +// +// The test is run with both old and new style names. +func (s) TestLDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for the same listener resource and have the + // callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Register the third watch for a different listener resource. + updateCh3 := testutils.NewChannel() + ldsCancel3 := client.WatchListener(ldsNameNewStyle, func(u xdsresource.ListenerUpdate, err error) { + updateCh3.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel3() + + // Configure the management server to return two listener resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(ldsName, rdsName), + e2e.DefaultClientListener(ldsNameNewStyle, rdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the all watchers. The two + // resources returned differ only in the resource name. Therefore the + // expected update is the same for all the watchers. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh3, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestLDSWatch_ResourceCaching covers the case where a watch is registered for +// a resource which is already present in the cache. The test verifies that the +// watch callback is invoked with the contents from the cache, instead of a +// request being sent to the management server. +func (s) TestLDSWatch_ResourceCaching(t *testing.T) { + overrideFedEnvVar(t) + firstRequestReceived := false + firstAckReceived := grpcsync.NewEvent() + secondRequestReceived := grpcsync.NewEvent() + + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // The first request has an empty version string. + if !firstRequestReceived && req.GetVersionInfo() == "" { + firstRequestReceived = true + return nil + } + // The first ack has a non-empty version string. + if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" { + firstAckReceived.Fire() + return nil + } + // Any requests after the first request and ack, are not expected. + secondRequestReceived.Fire() + return nil + }, + }) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a listener resource and have the watch + // callback push the received update on to a channel. + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + + // Configure the management server to return a single listener + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for receipt of ACK at the management server") + case <-firstAckReceived.Done(): + } + + // Register another watch for the same resource. This should get the update + // from the cache. + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + // No request should get sent out as part of this watch. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-secondRequestReceived.Done(): + t.Fatal("xdsClient sent out request instead of using update from cache") + } +} + +// TestLDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client +// does not receive an LDS response for the request that it sends. The test +// verifies that the watch callback is invoked with an error once the +// watchExpiryTimer fires. +func (s) TestLDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Register a watch for a resource which is expected to fail with an error + // after the watch expiry timer fires. + updateCh := testutils.NewChannel() + ldsCancel := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel() + + // Wait for the watch expiry timer to fire. + <-time.After(defaultTestWatchExpiryTimeout) + + // Verify that an empty update with the expected error is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") + if err := verifyListenerUpdate(ctx, updateCh, xdsresource.ListenerUpdateErrTuple{Err: wantErr}); err != nil { + t.Fatal(err) + } +} + +// TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the +// client receives a valid LDS response for the request that it sends. The test +// verifies that the behavior associated with the expiry timer (i.e, callback +// invocation with error) does not take place. +func (s) TestLDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Register a watch for a listener resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + ldsCancel := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel() + + // Configure the management server to return a single listener + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(ldsName, rdsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Wait for the watch expiry timer to fire, and verify that the callback is + // not invoked. + <-time.After(defaultTestWatchExpiryTimeout) + if err := verifyNoListenerUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } +} + +// TestLDSWatch_ResourceRemoved covers the cases where a resource being watched +// is removed from the management server. The test verifies the following +// scenarios: +// 1. Removing a resource should trigger the watch callback with a resource +// removed error. It should not trigger the watch callback for an unrelated +// resource. +// 2. An update to another resource should result in the invocation of the watch +// callback associated with that resource. It should not result in the +// invocation of the watch callback associated with the deleted resource. +// +// The test is run with both old and new style names. +func (s) TestLDSWatch_ResourceRemoved(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for two listener resources and have the + // callbacks push the received updates on to a channel. + resourceName1 := ldsName + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(resourceName1, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + + resourceName2 := ldsNameNewStyle + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(resourceName2, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Configure the management server to return two listener resources, + // corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(resourceName1, rdsName), + e2e.DefaultClientListener(resourceName2, rdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for both watchers. The two + // resources returned differ only in the resource name. Therefore the + // expected update is the same for both watchers. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + + // Remove the first listener resource on the management server. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(resourceName2, rdsName)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // The first watcher should receive a resource removed error, while the + // second watcher should not see an update. + if err := verifyListenerUpdate(ctx, updateCh1, xdsresource.ListenerUpdateErrTuple{ + Err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, ""), + }); err != nil { + t.Fatal(err) + } + if err := verifyNoListenerUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update the second listener resource on the management server. The first + // watcher should not see an update, while the second watcher should. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{e2e.DefaultClientListener(resourceName2, "new-rds-resource")}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoListenerUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + wantUpdate = xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: "new-rds-resource", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestLDSWatch_NACKError covers the case where an update from the management +// server is NACK'ed by the xdsclient. The test verifies that the error is +// propagated to the watcher. +func (s) TestLDSWatch_NACKError(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a listener resource and have the watch + // callback push the received update on to a channel. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + updateCh := testutils.NewChannel() + ldsCancel := client.WatchListener(ldsName, func(u xdsresource.ListenerUpdate, err error) { + updateCh.SendContext(ctx, xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel() + + // Configure the management server to return a single listener resource + // which is expected to be NACKed by the client. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{badListenerResource(ldsName)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher. + u, err := updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a listener resource from the management server: %v", err) + } + gotErr := u.(xdsresource.ListenerUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantListenerNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantListenerNACKErr) + } +} + +// TestLDSWatch_PartialValid covers the case where a response from the +// management server contains both valid and invalid resources and is expected +// to be NACK'ed by the xdsclient. The test verifies that watchers corresponding +// to the valid resource receive the update, while watchers corresponding to the +// invalid resource receive an error. +func (s) TestLDSWatch_PartialValid(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for listener resources. The first watch is expected + // to receive an error because the received resource is NACKed. The second + // watch is expected to get a good update. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + badResourceName := ldsName + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(badResourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.SendContext(ctx, xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + goodResourceName := ldsNameNewStyle + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(goodResourceName, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.SendContext(ctx, xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Configure the management server with two listener resources. One of these + // is a bad resource causing the update to be NACKed. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + badListenerResource(badResourceName), + e2e.DefaultClientListener(goodResourceName, rdsName), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher which + // requested for the bad resource. + u, err := updateCh1.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a listener resource from the management server: %v", err) + } + gotErr := u.(xdsresource.ListenerUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantListenerNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantListenerNACKErr) + } + + // Verify that the watcher watching the good resource receives a good + // update. + wantUpdate := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestLDSWatch_PartialResponse covers the case where a response from the +// management server does not contain all requested resources. LDS responses are +// supposed to contain all requested resources, and the absence of one usually +// indicates that the management server does not know about it. In cases where +// the server has never responded with this resource before, the xDS client is +// expected to wait for the watch timeout to expire before concluding that the +// resource does not exist on the server +func (s) TestLDSWatch_PartialResponse(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for two listener resources and have the + // callbacks push the received updates on to a channel. + resourceName1 := ldsName + updateCh1 := testutils.NewChannel() + ldsCancel1 := client.WatchListener(resourceName1, func(u xdsresource.ListenerUpdate, err error) { + updateCh1.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel1() + + resourceName2 := ldsNameNewStyle + updateCh2 := testutils.NewChannel() + ldsCancel2 := client.WatchListener(resourceName2, func(u xdsresource.ListenerUpdate, err error) { + updateCh2.Send(xdsresource.ListenerUpdateErrTuple{Update: u, Err: err}) + }) + defer ldsCancel2() + + // Configure the management server to return only one of the two listener + // resources, corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(resourceName1, rdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for first watcher. + wantUpdate1 := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh1, wantUpdate1); err != nil { + t.Fatal(err) + } + + // Verify that the second watcher does not get an update with an error. + if err := verifyNoListenerUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Configure the management server to return two listener resources, + // corresponding to the registered watches. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{ + e2e.DefaultClientListener(resourceName1, rdsName), + e2e.DefaultClientListener(resourceName2, rdsName), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the second watcher. + wantUpdate2 := xdsresource.ListenerUpdateErrTuple{ + Update: xdsresource.ListenerUpdate{ + RouteConfigName: rdsName, + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + } + if err := verifyListenerUpdate(ctx, updateCh2, wantUpdate2); err != nil { + t.Fatal(err) + } + + // Verify that the first watcher gets no update, as the first resource did + // not change. + if err := verifyNoListenerUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/tests/misc_watchers_test.go b/xds/internal/xdsclient/tests/misc_watchers_test.go new file mode 100644 index 000000000000..19cf7daba3fe --- /dev/null +++ b/xds/internal/xdsclient/tests/misc_watchers_test.go @@ -0,0 +1,311 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "testing" + + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" + "google.golang.org/grpc/xds/internal" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +var ( + // Resource type implementations retrieved from the resource type map in the + // internal package, which is initialized when the individual resource types + // are created. + listenerResourceType = internal.ResourceTypeMapForTesting[version.V3ListenerURL].(xdsresource.Type) + routeConfigResourceType = internal.ResourceTypeMapForTesting[version.V3RouteConfigURL].(xdsresource.Type) +) + +// TestWatchCallAnotherWatch tests the scenario where a watch is registered for +// a resource, and more watches are registered from the first watch's callback. +// The test verifies that this scenario does not lead to a deadlock. +func (s) TestWatchCallAnotherWatch(t *testing.T) { + overrideFedEnvVar(t) + + // Start an xDS management server and set the option to allow it to respond + // to requests which only specify a subset of the configured resources. + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{AllowResourceSubset: true}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Configure the management server to respond with route config resources. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{ + e2e.DefaultRouteConfig(rdsName, ldsName, cdsName), + e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, cdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Start a watch for one route configuration resource. From the watch + // callback of the first resource, register two more watches (one for the + // same resource name, which would be satisfied from the cache, and another + // for a different resource name, which would be satisfied from the server). + updateCh1 := testutils.NewChannel() + updateCh2 := testutils.NewChannel() + updateCh3 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + + // Watch for the same resource name. + rdsCancel2 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + t.Cleanup(rdsCancel2) + // Watch for a different resource name. + rdsCancel3 := client.WatchRouteConfig(rdsNameNewStyle, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh3.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + t.Cleanup(rdsCancel3) + }) + t.Cleanup(rdsCancel1) + + // Verify the contents of the received update for the all watchers. + wantUpdate12 := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, + }, + }, + }, + }, + }, + } + wantUpdate3 := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsNameNewStyle}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh1, wantUpdate12); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate12); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh3, wantUpdate3); err != nil { + t.Fatal(err) + } +} + +// TestNodeProtoSentOnlyInFirstRequest verifies that a non-empty node proto gets +// sent only on the first discovery request message on the ADS stream. +// +// It also verifies the same behavior holds after a stream restart. +func (s) TestNodeProtoSentOnlyInFirstRequest(t *testing.T) { + overrideFedEnvVar(t) + + // Create a restartable listener which can close existing connections. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + lis := testutils.NewRestartableListener(l) + + // Start a fake xDS management server with the above restartable listener. + // + // We are unable to use the go-control-plane server here, because it caches + // the node proto received in the first request message and adds it to + // subsequent requests before invoking the OnStreamRequest() callback. + // Therefore we cannot verify what is sent by the xDS client. + mgmtServer, cleanup, err := fakeserver.StartServer(lis) + if err != nil { + t.Fatalf("Failed to start fake xDS server: %v", err) + } + defer cleanup() + + // Create a bootstrap file in a temporary directory. + nodeID := uuid.New().String() + bootstrapContents, err := bootstrap.Contents(bootstrap.Options{ + NodeID: nodeID, + ServerURI: mgmtServer.Address, + }) + if err != nil { + t.Fatalf("Failed to create bootstrap file: %v", err) + } + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + const ( + serviceName = "my-service-client-side-xds" + routeConfigName = "route-" + serviceName + clusterName = "cluster-" + serviceName + ) + + // Register a watch for the Listener resource. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + watcher := xdstestutils.NewTestResourceWatcher() + client.WatchResource(listenerResourceType, serviceName, watcher) + + // Ensure the watch results in a discovery request with an empty node proto. + if err := readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // Configure a listener resource on the fake xDS server. + lisAny, err := anypb.New(e2e.DefaultClientListener(serviceName, routeConfigName)) + if err != nil { + t.Fatalf("Failed to marshal listener resource into an Any proto: %v", err) + } + mgmtServer.XDSResponseChan <- &fakeserver.Response{ + Resp: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{lisAny}, + }, + } + + // The xDS client is expected to ACK the Listener resource. The discovery + // request corresponding to the ACK must contain a nil node proto. + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // Register a watch for a RouteConfiguration resource. + client.WatchResource(routeConfigResourceType, routeConfigName, watcher) + + // Ensure the watch results in a discovery request with an empty node proto. + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // Configure the route configuration resource on the fake xDS server. + rcAny, err := anypb.New(e2e.DefaultRouteConfig(routeConfigName, serviceName, clusterName)) + if err != nil { + t.Fatalf("Failed to marshal route configuration resource into an Any proto: %v", err) + } + mgmtServer.XDSResponseChan <- &fakeserver.Response{ + Resp: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{rcAny}, + }, + } + + // Ensure the discovery request for the ACK contains an empty node proto. + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + + // Stop the management server and expect the error callback to be invoked. + lis.Stop() + select { + case <-ctx.Done(): + t.Fatal("Timeout when waiting for the connection error to be propagated to the watcher") + case <-watcher.ErrorCh: + } + + // Restart the management server. + lis.Restart() + + // The xDS client is expected to re-request previously requested resources. + // Hence, we expect two DiscoveryRequest messages (one for the Listener and + // one for the RouteConfiguration resource). The first message should contain + // a non-nil node proto and the second should contain a nil-proto. + // + // And since we don't push any responses on the response channel of the fake + // server, we do not expect any ACKs here. + if err := readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } + if err := readDiscoveryResponseAndCheckForEmptyNodeProto(ctx, mgmtServer.XDSRequestChan); err != nil { + t.Fatal(err) + } +} + +// readDiscoveryResponseAndCheckForEmptyNodeProto reads a discovery request +// message out of the provided reqCh. It returns an error if it fails to read a +// message before the context deadline expires, or if the read message contains +// a non-empty node proto. +func readDiscoveryResponseAndCheckForEmptyNodeProto(ctx context.Context, reqCh *testutils.Channel) error { + v, err := reqCh.Receive(ctx) + if err != nil { + return fmt.Errorf("Timeout when waiting for a DiscoveryRequest message") + } + req := v.(*fakeserver.Request).Req.(*v3discoverypb.DiscoveryRequest) + if node := req.GetNode(); node != nil { + return fmt.Errorf("Node proto received in DiscoveryRequest message is %v, want empty node proto", node) + } + return nil +} + +// readDiscoveryResponseAndCheckForNonEmptyNodeProto reads a discovery request +// message out of the provided reqCh. It returns an error if it fails to read a +// message before the context deadline expires, or if the read message contains +// an empty node proto. +func readDiscoveryResponseAndCheckForNonEmptyNodeProto(ctx context.Context, reqCh *testutils.Channel) error { + v, err := reqCh.Receive(ctx) + if err != nil { + return fmt.Errorf("Timeout when waiting for a DiscoveryRequest message") + } + req := v.(*fakeserver.Request).Req.(*v3discoverypb.DiscoveryRequest) + if node := req.GetNode(); node == nil { + return fmt.Errorf("Empty node proto received in DiscoveryRequest message, want non-empty node proto") + } + return nil +} diff --git a/xds/internal/xdsclient/tests/rds_watchers_test.go b/xds/internal/xdsclient/tests/rds_watchers_test.go new file mode 100644 index 000000000000..b03b9ce259bc --- /dev/null +++ b/xds/internal/xdsclient/tests/rds_watchers_test.go @@ -0,0 +1,857 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +// badRouteConfigResource returns a RouteConfiguration resource for the given +// routeName which contains a retry config with num_retries set to `0`. This is +// expected to be NACK'ed by the xDS client. +func badRouteConfigResource(routeName, ldsTarget, clusterName string) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}}}, + RetryPolicy: &v3routepb.RetryPolicy{ + NumRetries: &wrapperspb.UInt32Value{Value: 0}, + }, + }}, + } +} + +// xdsClient is expected to produce an error containing this string when an +// update is received containing a route configuration resource created using +// `badRouteConfigResource`. +const wantRouteConfigNACKErr = "received route is invalid: retry_policy.num_retries = 0; must be >= 1" + +// verifyRouteConfigUpdate waits for an update to be received on the provided +// update channel and verifies that it matches the expected update. +// +// Returns an error if no update is received before the context deadline expires +// or the received update does not match the expected one. +func verifyRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate xdsresource.RouteConfigUpdateErrTuple) error { + u, err := updateCh.Receive(ctx) + if err != nil { + return fmt.Errorf("timeout when waiting for a route configuration resource from the management server: %v", err) + } + got := u.(xdsresource.RouteConfigUpdateErrTuple) + if wantUpdate.Err != nil { + if gotType, wantType := xdsresource.ErrType(got.Err), xdsresource.ErrType(wantUpdate.Err); gotType != wantType { + return fmt.Errorf("received update with error type %v, want %v", gotType, wantType) + } + } + cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw")} + if diff := cmp.Diff(wantUpdate.Update, got.Update, cmpOpts...); diff != "" { + return fmt.Errorf("received unepected diff in the route configuration resource update: (-want, got):\n%s", diff) + } + return nil +} + +// verifyNoRouteConfigUpdate verifies that no route configuration update is +// received on the provided update channel, and returns an error if an update is +// received. +// +// A very short deadline is used while waiting for the update, as this function +// is intended to be used when an update is not expected. +func verifyNoRouteConfigUpdate(ctx context.Context, updateCh *testutils.Channel) error { + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded { + return fmt.Errorf("unexpected RouteConfigUpdate: %v", u) + } + return nil +} + +// TestRDSWatch covers the case where a single watcher exists for a single route +// configuration resource. The test verifies the following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of the watch callback. +// 2. An update from the management server containing a resource *not* being +// watched should not result in the invocation of the watch callback. +// 3. After the watch is cancelled, an update from the management server +// containing the resource that was being watched should not result in the +// invocation of the watch callback. +// +// The test is run for old and new style names. +func (s) TestRDSWatch(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3routepb.RouteConfiguration // The resource being watched. + updatedWatchedResource *v3routepb.RouteConfiguration // The watched resource after an update. + notWatchedResource *v3routepb.RouteConfiguration // A resource which is not being watched. + wantUpdate xdsresource.RouteConfigUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: rdsName, + watchedResource: e2e.DefaultRouteConfig(rdsName, ldsName, cdsName), + updatedWatchedResource: e2e.DefaultRouteConfig(rdsName, ldsName, "new-cds-resource"), + notWatchedResource: e2e.DefaultRouteConfig("unsubscribed-rds-resource", ldsName, cdsName), + wantUpdate: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, + }, + }, + }, + }, + }, + }, + }, + { + desc: "new style resource", + resourceName: rdsNameNewStyle, + watchedResource: e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, cdsNameNewStyle), + updatedWatchedResource: e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, "new-cds-resource"), + notWatchedResource: e2e.DefaultRouteConfig("unsubscribed-rds-resource", ldsNameNewStyle, cdsNameNewStyle), + wantUpdate: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsNameNewStyle}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsNameNewStyle: {Weight: 100}}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a route configuration resource and have the + // watch callback push the received update on to a channel. + updateCh := testutils.NewChannel() + rdsCancel := client.WatchRouteConfig(test.resourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single route + // configuration resource, corresponding to the one being watched. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyRouteConfigUpdate(ctx, updateCh, test.wantUpdate); err != nil { + t.Fatal(err) + } + + // Configure the management server to return an additional route + // configuration resource, one that we are not interested in. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.watchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + + // Cancel the watch and update the resource corresponding to the original + // watch. Ensure that the cancelled watch callback is not invoked. + rdsCancel() + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.updatedWatchedResource, test.notWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestRDSWatch_TwoWatchesForSameResourceName covers the case where two watchers +// exist for a single route configuration resource. The test verifies the +// following scenarios: +// 1. An update from the management server containing the resource being +// watched should result in the invocation of both watch callbacks. +// 2. After one of the watches is cancelled, a redundant update from the +// management server should not result in the invocation of either of the +// watch callbacks. +// 3. An update from the management server containing the resource being +// watched should result in the invocation of the un-cancelled watch +// callback. +// +// The test is run for old and new style names. +func (s) TestRDSWatch_TwoWatchesForSameResourceName(t *testing.T) { + tests := []struct { + desc string + resourceName string + watchedResource *v3routepb.RouteConfiguration // The resource being watched. + updatedWatchedResource *v3routepb.RouteConfiguration // The watched resource after an update. + wantUpdateV1 xdsresource.RouteConfigUpdateErrTuple + wantUpdateV2 xdsresource.RouteConfigUpdateErrTuple + }{ + { + desc: "old style resource", + resourceName: rdsName, + watchedResource: e2e.DefaultRouteConfig(rdsName, ldsName, cdsName), + updatedWatchedResource: e2e.DefaultRouteConfig(rdsName, ldsName, "new-cds-resource"), + wantUpdateV1: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, + }, + }, + }, + }, + }, + }, + wantUpdateV2: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{"new-cds-resource": {Weight: 100}}, + }, + }, + }, + }, + }, + }, + }, + { + desc: "new style resource", + resourceName: rdsNameNewStyle, + watchedResource: e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, cdsNameNewStyle), + updatedWatchedResource: e2e.DefaultRouteConfig(rdsNameNewStyle, ldsNameNewStyle, "new-cds-resource"), + wantUpdateV1: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsNameNewStyle}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsNameNewStyle: {Weight: 100}}, + }, + }, + }, + }, + }, + }, + wantUpdateV2: xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsNameNewStyle}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{"new-cds-resource": {Weight: 100}}, + }, + }, + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for the same route configuration resource + // and have the callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(test.resourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(test.resourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + + // Configure the management server to return a single route + // configuration resource, corresponding to the one being watched. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.watchedResource}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + if err := verifyRouteConfigUpdate(ctx, updateCh1, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, test.wantUpdateV1); err != nil { + t.Fatal(err) + } + + // Cancel the second watch and force the management server to push a + // redundant update for the resource being watched. Neither of the + // two watch callbacks should be invoked. + rdsCancel2() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh1); err != nil { + t.Fatal(err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + + // Update to the resource being watched. The un-cancelled callback + // should be invoked while the cancelled one should not be. + resources = e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{test.updatedWatchedResource}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh1, test.wantUpdateV2); err != nil { + t.Fatal(err) + } + if err := verifyNoRouteConfigUpdate(ctx, updateCh2); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestRDSWatch_ThreeWatchesForDifferentResourceNames covers the case with three +// watchers (two watchers for one resource, and the third watcher for another +// resource), exist across two route configuration resources. The test verifies +// that an update from the management server containing both resources results +// in the invocation of all watch callbacks. +// +// The test is run with both old and new style names. +func (s) TestRDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for the same route configuration resource + // and have the callbacks push the received updates on to a channel. + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel2() + + // Register the third watch for a different route configuration resource. + updateCh3 := testutils.NewChannel() + rdsCancel3 := client.WatchRouteConfig(rdsNameNewStyle, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh3.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel3() + + // Configure the management server to return two route configuration + // resources, corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{ + e2e.DefaultRouteConfig(rdsName, ldsName, cdsName), + e2e.DefaultRouteConfig(rdsNameNewStyle, ldsName, cdsName), + }, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update for the all watchers. The two + // resources returned differ only in the resource name. Therefore the + // expected update is the same for all the watchers. + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + if err := verifyRouteConfigUpdate(ctx, updateCh3, wantUpdate); err != nil { + t.Fatal(err) + } +} + +// TestRDSWatch_ResourceCaching covers the case where a watch is registered for +// a resource which is already present in the cache. The test verifies that the +// watch callback is invoked with the contents from the cache, instead of a +// request being sent to the management server. +func (s) TestRDSWatch_ResourceCaching(t *testing.T) { + overrideFedEnvVar(t) + firstRequestReceived := false + firstAckReceived := grpcsync.NewEvent() + secondRequestReceived := grpcsync.NewEvent() + + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{ + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + // The first request has an empty version string. + if !firstRequestReceived && req.GetVersionInfo() == "" { + firstRequestReceived = true + return nil + } + // The first ack has a non-empty version string. + if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" { + firstAckReceived.Fire() + return nil + } + // Any requests after the first request and ack, are not expected. + secondRequestReceived.Fire() + return nil + }, + }) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a route configuration resource and have the watch + // callback push the received update on to a channel. + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + + // Configure the management server to return a single route configuration + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, cdsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh1, wantUpdate); err != nil { + t.Fatal(err) + } + select { + case <-ctx.Done(): + t.Fatal("timeout when waiting for receipt of ACK at the management server") + case <-firstAckReceived.Done(): + } + + // Register another watch for the same resource. This should get the update + // from the cache. + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel2() + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } + // No request should get sent out as part of this watch. + sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout) + defer sCancel() + select { + case <-sCtx.Done(): + case <-secondRequestReceived.Done(): + t.Fatal("xdsClient sent out request instead of using update from cache") + } +} + +// TestRDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client +// does not receive an RDS response for the request that it sends. The test +// verifies that the watch callback is invoked with an error once the +// watchExpiryTimer fires. +func (s) TestRDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to a non-existent management server. + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Register a watch for a resource which is expected to fail with an error + // after the watch expiry timer fires. + updateCh := testutils.NewChannel() + rdsCancel := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel() + + // Wait for the watch expiry timer to fire. + <-time.After(defaultTestWatchExpiryTimeout) + + // Verify that an empty update with the expected error is received. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "") + if err := verifyRouteConfigUpdate(ctx, updateCh, xdsresource.RouteConfigUpdateErrTuple{Err: wantErr}); err != nil { + t.Fatal(err) + } +} + +// TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the +// client receives a valid RDS response for the request that it sends. The test +// verifies that the behavior associated with the expiry timer (i.e, callback +// invocation with error) does not take place. +func (s) TestRDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{}) + if err != nil { + t.Fatalf("Failed to spin up the xDS management server: %v", err) + } + defer mgmtServer.Stop() + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + + // Register a watch for a route configuration resource and have the watch + // callback push the received update on to a channel. + updateCh := testutils.NewChannel() + rdsCancel := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh.Send(xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel() + + // Configure the management server to return a single route configuration + // resource, corresponding to the one we registered a watch for. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{e2e.DefaultRouteConfig(rdsName, ldsName, cdsName)}, + SkipValidation: true, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify the contents of the received update. + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh, wantUpdate); err != nil { + t.Fatal(err) + } + + // Wait for the watch expiry timer to fire, and verify that the callback is + // not invoked. + <-time.After(defaultTestWatchExpiryTimeout) + if err := verifyNoRouteConfigUpdate(ctx, updateCh); err != nil { + t.Fatal(err) + } +} + +// TestRDSWatch_NACKError covers the case where an update from the management +// server is NACK'ed by the xdsclient. The test verifies that the error is +// propagated to the watcher. +func (s) TestRDSWatch_NACKError(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register a watch for a route configuration resource and have the watch + // callback push the received update on to a channel. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + updateCh := testutils.NewChannel() + rdsCancel := client.WatchRouteConfig(rdsName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh.SendContext(ctx, xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel() + + // Configure the management server to return a single route configuration + // resource which is expected to be NACKed by the client. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{badRouteConfigResource(rdsName, ldsName, cdsName)}, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher. + u, err := updateCh.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a route configuration resource from the management server: %v", err) + } + gotErr := u.(xdsresource.RouteConfigUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantRouteConfigNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantRouteConfigNACKErr) + } +} + +// TestRDSWatch_PartialValid covers the case where a response from the +// management server contains both valid and invalid resources and is expected +// to be NACK'ed by the xdsclient. The test verifies that watchers corresponding +// to the valid resource receive the update, while watchers corresponding to the +// invalid resource receive an error. +func (s) TestRDSWatch_PartialValid(t *testing.T) { + overrideFedEnvVar(t) + mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{}) + defer cleanup() + + // Create an xDS client with the above bootstrap contents. + client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents) + if err != nil { + t.Fatalf("Failed to create xDS client: %v", err) + } + defer close() + + // Register two watches for route configuration resources. The first watch + // is expected to receive an error because the received resource is NACKed. + // The second watch is expected to get a good update. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + badResourceName := rdsName + updateCh1 := testutils.NewChannel() + rdsCancel1 := client.WatchRouteConfig(badResourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh1.SendContext(ctx, xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel1() + goodResourceName := rdsNameNewStyle + updateCh2 := testutils.NewChannel() + rdsCancel2 := client.WatchRouteConfig(goodResourceName, func(u xdsresource.RouteConfigUpdate, err error) { + updateCh2.SendContext(ctx, xdsresource.RouteConfigUpdateErrTuple{Update: u, Err: err}) + }) + defer rdsCancel2() + + // Configure the management server to return two route configuration + // resources, corresponding to the registered watches. + resources := e2e.UpdateOptions{ + NodeID: nodeID, + Routes: []*v3routepb.RouteConfiguration{ + badRouteConfigResource(badResourceName, ldsName, cdsName), + e2e.DefaultRouteConfig(goodResourceName, ldsName, cdsName), + }, + SkipValidation: true, + } + if err := mgmtServer.Update(ctx, resources); err != nil { + t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err) + } + + // Verify that the expected error is propagated to the watcher which + // requested for the bad resource. + u, err := updateCh1.Receive(ctx) + if err != nil { + t.Fatalf("timeout when waiting for a route configuration resource from the management server: %v", err) + } + gotErr := u.(xdsresource.RouteConfigUpdateErrTuple).Err + if gotErr == nil || !strings.Contains(gotErr.Error(), wantRouteConfigNACKErr) { + t.Fatalf("update received with error: %v, want %q", gotErr, wantRouteConfigNACKErr) + } + + // Verify that the watcher watching the good resource receives a good + // update. + wantUpdate := xdsresource.RouteConfigUpdateErrTuple{ + Update: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{ldsName}, + Routes: []*xdsresource.Route{ + { + Prefix: newStringP("/"), + ActionType: xdsresource.RouteActionRoute, + WeightedClusters: map[string]xdsresource.WeightedCluster{cdsName: {Weight: 100}}, + }, + }, + }, + }, + }, + } + if err := verifyRouteConfigUpdate(ctx, updateCh2, wantUpdate); err != nil { + t.Fatal(err) + } +} diff --git a/xds/internal/xdsclient/tests/resource_update_test.go b/xds/internal/xdsclient/tests/resource_update_test.go new file mode 100644 index 000000000000..a0b326186e88 --- /dev/null +++ b/xds/internal/xdsclient/tests/resource_update_test.go @@ -0,0 +1,1131 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient_test + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" + "google.golang.org/grpc/xds/internal" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. +) + +// startFakeManagementServer starts a fake xDS management server and returns a +// cleanup function to close the fake server. +func startFakeManagementServer(t *testing.T) (*fakeserver.Server, func()) { + t.Helper() + fs, sCleanup, err := fakeserver.StartServer(nil) + if err != nil { + t.Fatalf("Failed to start fake xDS server: %v", err) + } + return fs, sCleanup +} + +func compareUpdateMetadata(ctx context.Context, dumpFunc func() map[string]xdsresource.UpdateWithMD, want map[string]xdsresource.UpdateWithMD) error { + var lastErr error + for ; ctx.Err() == nil; <-time.After(100 * time.Millisecond) { + cmpOpts := cmp.Options{ + cmpopts.EquateEmpty(), + cmp.Comparer(func(a, b time.Time) bool { return true }), + cmpopts.EquateErrors(), + protocmp.Transform(), + } + gotUpdateMetadata := dumpFunc() + diff := cmp.Diff(want, gotUpdateMetadata, cmpOpts) + if diff == "" { + return nil + } + lastErr = fmt.Errorf("unexpected diff in metadata, diff (-want +got):\n%s\n want: %+v\n got: %+v", diff, want, gotUpdateMetadata) + } + return fmt.Errorf("timeout when waiting for expected update metadata: %v", lastErr) +} + +// TestHandleListenerResponseFromManagementServer covers different scenarios +// involving receipt of an LDS response from the management server. The test +// verifies that the internal state of the xDS client (parsed resource and +// metadata) matches expectations. +func (s) TestHandleListenerResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + var ( + emptyRouterFilter = e2e.RouterHTTPFilter + apiListener = &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }) + }(), + } + resource1 = &v3listenerpb.Listener{ + Name: resourceName1, + ApiListener: apiListener, + } + resource2 = &v3listenerpb.Listener{ + Name: resourceName2, + ApiListener: apiListener, + } + ) + + tests := []struct { + desc string + resourceName string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdate xdsresource.ListenerUpdate + wantErr string + wantUpdateMetadata map[string]xdsresource.UpdateWithMD + }{ + { + desc: "badly-marshaled-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Value: []byte{1, 2, 3, 4}, + }}, + }, + wantErr: "Listener not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "empty-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + }, + wantErr: "Listener not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "unexpected-type-in-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3routepb.RouteConfiguration{})}, + }, + wantErr: "Listener not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "one-bad-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{ + Name: resourceName1, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }}), + }, + }, + wantErr: "no RouteSpecifier", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "1", + Err: cmpopts.AnyError, + }, + }}, + }, + }, + { + desc: "one-good-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantUpdate: xdsresource.ListenerUpdate{ + RouteConfigName: "route-configuration-name", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + { + desc: "two-resources-when-we-requested-one", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantUpdate: xdsresource.ListenerUpdate{ + RouteConfigName: "route-configuration-name", + HTTPFilters: []xdsresource.HTTPFilter{{Name: "router"}}, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + t.Logf("Created xDS client to %s", mgmtServer.Address) + + // A wrapper struct to wrap the update and the associated error, as + // received by the resource watch callback. + type updateAndErr struct { + update xdsresource.ListenerUpdate + err error + } + updateAndErrCh := testutils.NewChannel() + + // Register a watch, and push the results on to a channel. + client.WatchListener(test.resourceName, func(update xdsresource.ListenerUpdate, err error) { + updateAndErrCh.Send(updateAndErr{update: update, err: err}) + }) + t.Logf("Registered a watch for Listener %q", test.resourceName) + + // Wait for the discovery request to be sent out. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := mgmtServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for discovery request at the management server: %v", ctx) + } + wantReq := &fakeserver.Request{Req: &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{test.resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + }} + gotReq := val.(*fakeserver.Request) + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Discovery request received at management server is %+v, want %+v", gotReq, wantReq) + } + t.Logf("Discovery request received at management server") + + // Configure the fake management server with a response. + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Wait for an update from the xDS client and compare with expected + // update. + val, err = updateAndErrCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) + } + gotUpdate := val.(updateAndErr).update + gotErr := val.(updateAndErr).err + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.HTTPFilter{}, "Filter", "Config"), + cmpopts.IgnoreFields(xdsresource.ListenerUpdate{}, "Raw"), + } + if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { + t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) + } + if err := compareUpdateMetadata(ctx, func() map[string]xdsresource.UpdateWithMD { + dump := client.DumpResources() + return dump["type.googleapis.com/envoy.config.listener.v3.Listener"] + }, test.wantUpdateMetadata); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestHandleRouteConfigResponseFromManagementServer covers different scenarios +// involving receipt of an RDS response from the management server. The test +// verifies that the internal state of the xDS client (parsed resource and +// metadata) matches expectations. +func (s) TestHandleRouteConfigResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + var ( + virtualHosts = []*v3routepb.VirtualHost{ + { + Domains: []string{"lds-target-name"}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: "cluster-name"}, + }, + }, + }, + }, + }, + } + resource1 = &v3routepb.RouteConfiguration{ + Name: resourceName1, + VirtualHosts: virtualHosts, + } + resource2 = &v3routepb.RouteConfiguration{ + Name: resourceName2, + VirtualHosts: virtualHosts, + } + ) + + tests := []struct { + desc string + resourceName string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdate xdsresource.RouteConfigUpdate + wantErr string + wantUpdateMetadata map[string]xdsresource.UpdateWithMD + }{ + // The first three tests involve scenarios where the response fails + // protobuf deserialization (because it contains an invalid data or type + // in the anypb.Any) or the requested resource is not present in the + // response. In either case, no resource update makes its way to the + // top-level xDS client. An RDS response without a requested resource + // does not mean that the resource does not exist in the server. It + // could be part of a future update. Therefore, the only failure mode + // for this resource is for the watch to timeout. + { + desc: "badly-marshaled-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + Value: []byte{1, 2, 3, 4}, + }}, + }, + wantErr: "RouteConfiguration not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "empty-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + }, + wantErr: "RouteConfiguration not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "unexpected-type-in-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3clusterpb.Cluster{})}, + }, + wantErr: "RouteConfiguration not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "one-bad-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3routepb.RouteConfiguration{ + Name: resourceName1, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds-resource-name"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: "cluster-resource-name"}, + }}}}, + RetryPolicy: &v3routepb.RetryPolicy{ + NumRetries: &wrapperspb.UInt32Value{Value: 0}, + }, + }}, + })}, + }, + wantErr: "received route is invalid: retry_policy.num_retries = 0; must be >= 1", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "1", + Err: cmpopts.AnyError, + }, + }}, + }, + }, + { + desc: "one-good-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantUpdate: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{"lds-target-name"}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{"cluster-name": {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}}, + }, + }, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + { + desc: "two-resources-when-we-requested-one", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantUpdate: xdsresource.RouteConfigUpdate{ + VirtualHosts: []*xdsresource.VirtualHost{ + { + Domains: []string{"lds-target-name"}, + Routes: []*xdsresource.Route{{Prefix: newStringP(""), + WeightedClusters: map[string]xdsresource.WeightedCluster{"cluster-name": {Weight: 1}}, + ActionType: xdsresource.RouteActionRoute}}, + }, + }, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + } + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + t.Logf("Created xDS client to %s", mgmtServer.Address) + + // A wrapper struct to wrap the update and the associated error, as + // received by the resource watch callback. + type updateAndErr struct { + update xdsresource.RouteConfigUpdate + err error + } + updateAndErrCh := testutils.NewChannel() + + // Register a watch, and push the results on to a channel. + client.WatchRouteConfig(test.resourceName, func(update xdsresource.RouteConfigUpdate, err error) { + updateAndErrCh.Send(updateAndErr{update: update, err: err}) + }) + t.Logf("Registered a watch for Route Configuration %q", test.resourceName) + + // Wait for the discovery request to be sent out. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := mgmtServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for discovery request at the management server: %v", ctx) + } + wantReq := &fakeserver.Request{Req: &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{test.resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + }} + gotReq := val.(*fakeserver.Request) + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Discovery request received at management server is %+v, want %+v", gotReq, wantReq) + } + t.Logf("Discovery request received at management server") + + // Configure the fake management server with a response. + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Wait for an update from the xDS client and compare with expected + // update. + val, err = updateAndErrCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) + } + gotUpdate := val.(updateAndErr).update + gotErr := val.(updateAndErr).err + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.RouteConfigUpdate{}, "Raw"), + } + if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { + t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) + } + if err := compareUpdateMetadata(ctx, func() map[string]xdsresource.UpdateWithMD { + dump := client.DumpResources() + return dump["type.googleapis.com/envoy.config.route.v3.RouteConfiguration"] + }, test.wantUpdateMetadata); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestHandleClusterResponseFromManagementServer covers different scenarios +// involving receipt of a CDS response from the management server. The test +// verifies that the internal state of the xDS client (parsed resource and +// metadata) matches expectations. +func (s) TestHandleClusterResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + resource1 := &v3clusterpb.Cluster{ + Name: resourceName1, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: "eds-service-name", + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + } + resource2 := proto.Clone(resource1).(*v3clusterpb.Cluster) + resource2.Name = resourceName2 + + tests := []struct { + desc string + resourceName string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdate xdsresource.ClusterUpdate + wantErr string + wantUpdateMetadata map[string]xdsresource.UpdateWithMD + }{ + { + desc: "badly-marshaled-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + Value: []byte{1, 2, 3, 4}, + }}, + }, + wantErr: "Cluster not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "empty-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + }, + wantErr: "Cluster not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "unexpected-type-in-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3endpointpb.ClusterLoadAssignment{})}, + }, + wantErr: "Cluster not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "one-bad-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: resourceName1, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: "eds-service-name", + }, + LbPolicy: v3clusterpb.Cluster_MAGLEV, + })}, + }, + wantErr: "unexpected lbPolicy MAGLEV", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "1", + Err: cmpopts.AnyError, + }, + }}, + }, + }, + { + desc: "one-good-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: "resource-name-1", + EDSServiceName: "eds-service-name", + LRSServerConfig: xdsresource.ClusterLRSServerSelf, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + { + desc: "two-resources-when-we-requested-one", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: "resource-name-1", + EDSServiceName: "eds-service-name", + LRSServerConfig: xdsresource.ClusterLRSServerSelf, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + t.Logf("Created xDS client to %s", mgmtServer.Address) + + // A wrapper struct to wrap the update and the associated error, as + // received by the resource watch callback. + type updateAndErr struct { + update xdsresource.ClusterUpdate + err error + } + updateAndErrCh := testutils.NewChannel() + + // Register a watch, and push the results on to a channel. + client.WatchCluster(test.resourceName, func(update xdsresource.ClusterUpdate, err error) { + updateAndErrCh.Send(updateAndErr{update: update, err: err}) + }) + t.Logf("Registered a watch for Cluster %q", test.resourceName) + + // Wait for the discovery request to be sent out. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := mgmtServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for discovery request at the management server: %v", ctx) + } + wantReq := &fakeserver.Request{Req: &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{test.resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.cluster.v3.Cluster", + }} + gotReq := val.(*fakeserver.Request) + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Discovery request received at management server is %+v, want %+v", gotReq, wantReq) + } + t.Logf("Discovery request received at management server") + + // Configure the fake management server with a response. + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Wait for an update from the xDS client and compare with expected + // update. + val, err = updateAndErrCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) + } + gotUpdate := val.(updateAndErr).update + gotErr := val.(updateAndErr).err + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy"), + } + if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { + t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) + } + if err := compareUpdateMetadata(ctx, func() map[string]xdsresource.UpdateWithMD { + dump := client.DumpResources() + return dump["type.googleapis.com/envoy.config.cluster.v3.Cluster"] + }, test.wantUpdateMetadata); err != nil { + t.Fatal(err) + } + }) + } +} + +// TestHandleEndpointsResponseFromManagementServer covers different scenarios +// involving receipt of a CDS response from the management server. The test +// verifies that the internal state of the xDS client (parsed resource and +// metadata) matches expectations. +func (s) TestHandleEndpointsResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + resource1 := &v3endpointpb.ClusterLoadAssignment{ + ClusterName: resourceName1, + Endpoints: []*v3endpointpb.LocalityLbEndpoints{ + { + Locality: &v3corepb.Locality{SubZone: "locality-1"}, + LbEndpoints: []*v3endpointpb.LbEndpoint{ + { + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: "addr1", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: uint32(314), + }, + }, + }, + }, + }, + }, + }, + }, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, + Priority: 1, + }, + { + Locality: &v3corepb.Locality{SubZone: "locality-2"}, + LbEndpoints: []*v3endpointpb.LbEndpoint{ + { + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: "addr2", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: uint32(159), + }, + }, + }, + }, + }, + }, + }, + }, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, + Priority: 0, + }, + }, + } + resource2 := proto.Clone(resource1).(*v3endpointpb.ClusterLoadAssignment) + resource2.ClusterName = resourceName2 + + tests := []struct { + desc string + resourceName string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantUpdate xdsresource.EndpointsUpdate + wantErr string + wantUpdateMetadata map[string]xdsresource.UpdateWithMD + }{ + // The first three tests involve scenarios where the response fails + // protobuf deserialization (because it contains an invalid data or type + // in the anypb.Any) or the requested resource is not present in the + // response. In either case, no resource update makes its way to the + // top-level xDS client. An EDS response without a requested resource + // does not mean that the resource does not exist in the server. It + // could be part of a future update. Therefore, the only failure mode + // for this resource is for the watch to timeout. + { + desc: "badly-marshaled-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + Resources: []*anypb.Any{{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + Value: []byte{1, 2, 3, 4}, + }}, + }, + wantErr: "Endpoints not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "empty-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + }, + wantErr: "Endpoints not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "unexpected-type-in-response", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3listenerpb.Listener{})}, + }, + wantErr: "Endpoints not found in received response", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist}}, + }, + }, + { + desc: "one-bad-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(&v3endpointpb.ClusterLoadAssignment{ + ClusterName: resourceName1, + Endpoints: []*v3endpointpb.LocalityLbEndpoints{ + { + Locality: &v3corepb.Locality{SubZone: "locality-1"}, + LbEndpoints: []*v3endpointpb.LbEndpoint{ + { + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: "addr1", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: uint32(314), + }, + }, + }, + }, + }, + }, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 0}, + }, + }, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: 1}, + Priority: 1, + }, + }, + }), + }, + }, + wantErr: "EDS response contains an endpoint with zero weight", + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": {MD: xdsresource.UpdateMetadata{ + Status: xdsresource.ServiceStatusNACKed, + ErrState: &xdsresource.UpdateErrorMetadata{ + Version: "1", + Err: cmpopts.AnyError, + }, + }}, + }, + }, + { + desc: "one-good-resource", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantUpdate: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314", Weight: 1}}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159", Weight: 1}}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + }, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + { + desc: "two-resources-when-we-requested-one", + resourceName: resourceName1, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + VersionInfo: "1", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantUpdate: xdsresource.EndpointsUpdate{ + Localities: []xdsresource.Locality{ + { + Endpoints: []xdsresource.Endpoint{{Address: "addr1:314", Weight: 1}}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []xdsresource.Endpoint{{Address: "addr2:159", Weight: 1}}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + }, + }, + wantUpdateMetadata: map[string]xdsresource.UpdateWithMD{ + "resource-name-1": { + MD: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusACKed, Version: "1"}, + Raw: testutils.MarshalAny(resource1), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create an xDS client talking to the above management server. + nodeID := uuid.New().String() + client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + }, defaultTestWatchExpiryTimeout, time.Duration(0)) + if err != nil { + t.Fatalf("failed to create xds client: %v", err) + } + defer close() + t.Logf("Created xDS client to %s", mgmtServer.Address) + + // Register a watch, and push the results on to a channel. + ew := newEndpointsWatcher() + edsCancel := xdsresource.WatchEndpoints(client, test.resourceName, ew) + defer edsCancel() + t.Logf("Registered a watch for Endpoint %q", test.resourceName) + + // Wait for the discovery request to be sent out. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + val, err := mgmtServer.XDSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for discovery request at the management server: %v", ctx) + } + wantReq := &fakeserver.Request{Req: &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{test.resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment", + }} + gotReq := val.(*fakeserver.Request) + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Discovery request received at management server is %+v, want %+v", gotReq, wantReq) + } + t.Logf("Discovery request received at management server") + + // Configure the fake management server with a response. + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Wait for an update from the xDS client and compare with expected + // update. + val, err = ew.updateCh.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for watch callback to invoked after response from management server: %v", err) + } + gotUpdate := val.(endpointsUpdateErrTuple).update + gotErr := val.(endpointsUpdateErrTuple).err + if (gotErr != nil) != (test.wantErr != "") { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + if gotErr != nil && !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("Got error from handling update: %v, want %v", gotErr, test.wantErr) + } + cmpOpts := []cmp.Option{ + cmpopts.EquateEmpty(), + cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw"), + } + if diff := cmp.Diff(test.wantUpdate, gotUpdate, cmpOpts...); diff != "" { + t.Fatalf("Unexpected diff in metadata, diff (-want +got):\n%s", diff) + } + if err := compareUpdateMetadata(ctx, func() map[string]xdsresource.UpdateWithMD { + dump := client.DumpResources() + return dump["type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment"] + }, test.wantUpdateMetadata); err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/xds/internal/xdsclient/transport/loadreport.go b/xds/internal/xdsclient/transport/loadreport.go new file mode 100644 index 000000000000..89ffc4fcec66 --- /dev/null +++ b/xds/internal/xdsclient/transport/loadreport.go @@ -0,0 +1,274 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/protobuf/proto" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +type lrsStream = v3lrsgrpc.LoadReportingService_StreamLoadStatsClient + +// ReportLoad starts reporting loads to the management server the transport is +// configured to use. +// +// It returns a Store for the user to report loads and a function to cancel the +// load reporting. +func (t *Transport) ReportLoad() (*load.Store, func()) { + t.lrsStartStream() + return t.lrsStore, grpcsync.OnceFunc(func() { t.lrsStopStream() }) +} + +// lrsStartStream starts an LRS stream to the server, if none exists. +func (t *Transport) lrsStartStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount++ + if t.lrsRefCount != 1 { + // Return early if the stream has already been started. + return + } + + ctx, cancel := context.WithCancel(context.Background()) + t.lrsCancelStream = cancel + + // Create a new done channel everytime a new stream is created. This ensures + // that we don't close the same channel multiple times (from lrsRunner() + // goroutine) when multiple streams are created and closed. + t.lrsRunnerDoneCh = make(chan struct{}) + go t.lrsRunner(ctx) +} + +// lrsStopStream closes the LRS stream, if this is the last user of the stream. +func (t *Transport) lrsStopStream() { + t.lrsMu.Lock() + defer t.lrsMu.Unlock() + + t.lrsRefCount-- + if t.lrsRefCount != 0 { + // Return early if the stream has other references. + return + } + + t.lrsCancelStream() + t.logger.Infof("Stopping LRS stream") + + // Wait for the runner goroutine to exit. The done channel will be + // recreated when a new stream is created. + <-t.lrsRunnerDoneCh +} + +// lrsRunner starts an LRS stream to report load data to the management server. +// It reports load at constant intervals (as configured by the management +// server) until the context is cancelled. +func (t *Transport) lrsRunner(ctx context.Context) { + defer close(t.lrsRunnerDoneCh) + + // This feature indicates that the client supports the + // LoadStatsResponse.send_all_clusters field in the LRS response. + node := proto.Clone(t.nodeProto).(*v3corepb.Node) + node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") + + backoffAttempt := 0 + backoffTimer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-backoffTimer.C: + case <-ctx.Done(): + backoffTimer.Stop() + return + } + + // We reset backoff state when we successfully receive at least one + // message from the server. + resetBackoff := func() bool { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) + if err != nil { + t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) + return false + } + t.logger.Infof("Created LRS stream to server %q", t.serverURI) + + if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { + t.logger.Warningf("Sending first LRS request failed: %v", err) + return false + } + + clusters, interval, err := t.recvFirstLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("Reading from LRS stream failed: %v", err) + return false + } + + t.sendLoads(streamCtx, stream, clusters, interval) + return true + }() + + if resetBackoff { + backoffTimer.Reset(0) + backoffAttempt = 0 + } else { + backoffTimer.Reset(t.backoff(backoffAttempt)) + backoffAttempt++ + } + } +} + +func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { + tick := time.NewTicker(interval) + defer tick.Stop() + for { + select { + case <-tick.C: + case <-ctx.Done(): + return + } + if err := t.sendLoadStatsRequest(stream, t.lrsStore.Stats(clusterNames)); err != nil { + t.logger.Warningf("Writing to LRS stream failed: %v", err) + return + } + } +} + +func (t *Transport) sendFirstLoadStatsRequest(stream lrsStream, node *v3corepb.Node) error { + req := &v3lrspb.LoadStatsRequest{Node: node} + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending initial LoadStatsRequest: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func (t *Transport) recvFirstLoadStatsResponse(stream lrsStream) ([]string, time.Duration, error) { + resp, err := stream.Recv() + if err != nil { + return nil, 0, fmt.Errorf("failed to receive first LoadStatsResponse: %v", err) + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Received first LoadStatsResponse: %s", pretty.ToJSON(resp)) + } + + interval, err := ptypes.Duration(resp.GetLoadReportingInterval()) + if err != nil { + return nil, 0, fmt.Errorf("invalid load_reporting_interval: %v", err) + } + + if resp.ReportEndpointGranularity { + // TODO(easwars): Support per endpoint loads. + return nil, 0, errors.New("lrs: endpoint loads requested, but not supported by current implementation") + } + + clusters := resp.Clusters + if resp.SendAllClusters { + // Return nil to send stats for all clusters. + clusters = nil + } + + return clusters, interval, nil +} + +func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) error { + clusterStats := make([]*v3endpointpb.ClusterStats, 0, len(loads)) + for _, sd := range loads { + droppedReqs := make([]*v3endpointpb.ClusterStats_DroppedRequests, 0, len(sd.Drops)) + for category, count := range sd.Drops { + droppedReqs = append(droppedReqs, &v3endpointpb.ClusterStats_DroppedRequests{ + Category: category, + DroppedCount: count, + }) + } + localityStats := make([]*v3endpointpb.UpstreamLocalityStats, 0, len(sd.LocalityStats)) + for l, localityData := range sd.LocalityStats { + lid, err := internal.LocalityIDFromString(l) + if err != nil { + return err + } + loadMetricStats := make([]*v3endpointpb.EndpointLoadMetricStats, 0, len(localityData.LoadStats)) + for name, loadData := range localityData.LoadStats { + loadMetricStats = append(loadMetricStats, &v3endpointpb.EndpointLoadMetricStats{ + MetricName: name, + NumRequestsFinishedWithMetric: loadData.Count, + TotalMetricValue: loadData.Sum, + }) + } + localityStats = append(localityStats, &v3endpointpb.UpstreamLocalityStats{ + Locality: &v3corepb.Locality{ + Region: lid.Region, + Zone: lid.Zone, + SubZone: lid.SubZone, + }, + TotalSuccessfulRequests: localityData.RequestStats.Succeeded, + TotalRequestsInProgress: localityData.RequestStats.InProgress, + TotalErrorRequests: localityData.RequestStats.Errored, + LoadMetricStats: loadMetricStats, + UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. + }) + } + + clusterStats = append(clusterStats, &v3endpointpb.ClusterStats{ + ClusterName: sd.Cluster, + ClusterServiceName: sd.Service, + UpstreamLocalityStats: localityStats, + TotalDroppedRequests: sd.TotalDrops, + DroppedRequests: droppedReqs, + LoadReportInterval: ptypes.DurationProto(sd.ReportInterval), + }) + } + + req := &v3lrspb.LoadStatsRequest{ClusterStats: clusterStats} + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("Sending LRS loads: %s", pretty.ToJSON(req)) + } + err := stream.Send(req) + if err == io.EOF { + return getStreamError(stream) + } + return err +} + +func getStreamError(stream lrsStream) error { + for { + if _, err := stream.Recv(); err != nil { + return err + } + } +} diff --git a/xds/internal/xdsclient/transport/loadreport_test.go b/xds/internal/xdsclient/transport/loadreport_test.go new file mode 100644 index 000000000000..c3cdfede5cb6 --- /dev/null +++ b/xds/internal/xdsclient/transport/loadreport_test.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport_test + +import ( + "context" + "testing" + "time" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" + "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/durationpb" + + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3lrspb "github.com/envoyproxy/go-control-plane/envoy/service/load_stats/v3" +) + +func (s) TestReportLoad(t *testing.T) { + // Create a fake xDS management server listening on a local port. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Create a transport to the fake management server. + nodeProto := &v3corepb.Node{Id: uuid.New().String()} + tr, err := transport.New(transport.Options{ + ServerCfg: *testutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: nodeProto, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No ADS validation. + OnErrorHandler: func(error) {}, // No ADS stream error handling. + OnSendHandler: func(*transport.ResourceSendInfo) {}, // No ADS stream update handling. + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Ensure that a new connection is made to the management server. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + if _, err := mgmtServer.NewConnChan.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for a new connection to the management server: %v", err) + } + + // Call the load reporting API, and ensure that an LRS stream is created. + store1, cancelLRS1 := tr.ReportLoad() + if err != nil { + t.Fatalf("Failed to start LRS load reporting: %v", err) + } + if _, err := mgmtServer.LRSStreamOpenChan.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for LRS stream to be created: %v", err) + } + + // Push some loads on the received store. + store1.PerCluster("cluster1", "eds1").CallDropped("test") + + // Ensure the initial request is received. + req, err := mgmtServer.LRSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for initial LRS request: %v", err) + } + gotInitialReq := req.(*fakeserver.Request).Req.(*v3lrspb.LoadStatsRequest) + nodeProto.ClientFeatures = []string{"envoy.lrs.supports_send_all_clusters"} + wantInitialReq := &v3lrspb.LoadStatsRequest{Node: nodeProto} + if diff := cmp.Diff(gotInitialReq, wantInitialReq, protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in initial LRS request (-got, +want):\n%s", diff) + } + + // Send a response from the server with a small deadline. + mgmtServer.LRSResponseChan <- &fakeserver.Response{ + Resp: &v3lrspb.LoadStatsResponse{ + SendAllClusters: true, + LoadReportingInterval: &durationpb.Duration{Nanos: 50000000}, // 50ms + }, + } + + // Ensure that loads are seen on the server. + req, err = mgmtServer.LRSRequestChan.Receive(ctx) + if err != nil { + t.Fatalf("Timeout when waiting for LRS request with loads: %v", err) + } + gotLoad := req.(*fakeserver.Request).Req.(*v3lrspb.LoadStatsRequest).ClusterStats + if l := len(gotLoad); l != 1 { + t.Fatalf("Received load for %d clusters, want 1", l) + } + // This field is set by the client to indicate the actual time elapsed since + // the last report was sent. We cannot deterministically compare this, and + // we cannot use the cmpopts.IgnoreFields() option on proto structs, since + // we already use the protocmp.Transform() which marshals the struct into + // another message. Hence setting this field to nil is the best option here. + gotLoad[0].LoadReportInterval = nil + wantLoad := &v3endpointpb.ClusterStats{ + ClusterName: "cluster1", + ClusterServiceName: "eds1", + TotalDroppedRequests: 1, + DroppedRequests: []*v3endpointpb.ClusterStats_DroppedRequests{{Category: "test", DroppedCount: 1}}, + } + if diff := cmp.Diff(wantLoad, gotLoad[0], protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in LRS request (-got, +want):\n%s", diff) + } + + // Make another call to the load reporting API, and ensure that a new LRS + // stream is not created. + store2, cancelLRS2 := tr.ReportLoad() + if err != nil { + t.Fatalf("Failed to start LRS load reporting: %v", err) + } + sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := mgmtServer.LRSStreamOpenChan.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("New LRS stream created when expected to use an existing one") + } + + // Push more loads. + store2.PerCluster("cluster2", "eds2").CallDropped("test") + + // Ensure that loads are seen on the server. We need a loop here because + // there could have been some requests from the client in the time between + // us reading the first request and now. Those would have been queued in the + // request channel that we read out of. + for { + if ctx.Err() != nil { + t.Fatalf("Timeout when waiting for new loads to be seen on the server") + } + + req, err = mgmtServer.LRSRequestChan.Receive(ctx) + if err != nil { + continue + } + gotLoad = req.(*fakeserver.Request).Req.(*v3lrspb.LoadStatsRequest).ClusterStats + if l := len(gotLoad); l != 1 { + continue + } + gotLoad[0].LoadReportInterval = nil + wantLoad := &v3endpointpb.ClusterStats{ + ClusterName: "cluster2", + ClusterServiceName: "eds2", + TotalDroppedRequests: 1, + DroppedRequests: []*v3endpointpb.ClusterStats_DroppedRequests{{Category: "test", DroppedCount: 1}}, + } + if diff := cmp.Diff(wantLoad, gotLoad[0], protocmp.Transform()); diff != "" { + t.Logf("Unexpected diff in LRS request (-got, +want):\n%s", diff) + continue + } + break + } + + // Cancel the first load reporting call, and ensure that the stream does not + // close (because we have aother call open). + cancelLRS1() + sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) + defer sCancel() + if _, err := mgmtServer.LRSStreamCloseChan.Receive(sCtx); err != context.DeadlineExceeded { + t.Fatal("LRS stream closed when expected to stay open") + } + + // Cancel the second load reporting call, and ensure the stream is closed. + cancelLRS2() + if _, err := mgmtServer.LRSStreamCloseChan.Receive(ctx); err != nil { + t.Fatal("Timeout waiting for LRS stream to close") + } + + // Calling the load reporting API again should result in the creation of a + // new LRS stream. This ensures that creating and closing multiple streams + // works smoothly. + _, cancelLRS3 := tr.ReportLoad() + if err != nil { + t.Fatalf("Failed to start LRS load reporting: %v", err) + } + if _, err := mgmtServer.LRSStreamOpenChan.Receive(ctx); err != nil { + t.Fatalf("Timeout when waiting for LRS stream to be created: %v", err) + } + cancelLRS3() +} diff --git a/xds/internal/xdsclient/transport/transport.go b/xds/internal/xdsclient/transport/transport.go new file mode 100644 index 000000000000..86803588a7cc --- /dev/null +++ b/xds/internal/xdsclient/transport/transport.go @@ -0,0 +1,638 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transport implements the xDS transport protocol functionality +// required by the xdsclient. +package transport + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/load" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +// Any per-RPC level logs which print complete request or response messages +// should be gated at this verbosity level. Other per-RPC level logs which print +// terse output should be at `INFO` and verbosity 2, which corresponds to using +// the `Debugf` method on the logger. +const perRPCVerbosityLevel = 9 + +type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient + +// Transport provides a resource-type agnostic implementation of the xDS +// transport protocol. At this layer, resource contents are supposed to be +// opaque blobs which should be be meaningful only to the xDS data model layer +// which is implemented by the `xdsresource` package. +// +// Under the hood, it owns the gRPC connection to a single management server and +// manages the lifecycle of ADS/LRS streams. It uses the xDS v3 transport +// protocol version. +type Transport struct { + // These fields are initialized at creation time and are read-only afterwards. + cc *grpc.ClientConn // ClientConn to the mangement server. + serverURI string // URI of the management server. + onRecvHandler OnRecvHandlerFunc // Resource update handler. xDS data model layer. + onErrorHandler func(error) // To report underlying stream errors. + onSendHandler OnSendHandlerFunc // To report resources requested on ADS stream. + lrsStore *load.Store // Store returned to user for pushing loads. + backoff func(int) time.Duration // Backoff after stream failures. + nodeProto *v3corepb.Node // Identifies the gRPC application. + logger *grpclog.PrefixLogger // Prefix logger for transport logs. + adsRunnerCancel context.CancelFunc // CancelFunc for the ADS goroutine. + adsRunnerDoneCh chan struct{} // To notify exit of ADS goroutine. + lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. + + // These channels enable synchronization amongst the different goroutines + // spawned by the transport, and between asynchorous events resulting from + // receipt of responses from the management server. + adsStreamCh chan adsStream // New ADS streams are pushed here. + adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. + + // mu guards the following runtime state maintained by the transport. + mu sync.Mutex + // resources is map from resource type URL to the set of resource names + // being requested for that type. When the ADS stream is restarted, the + // transport requests all these resources again from the management server. + resources map[string]map[string]bool + // versions is a map from resource type URL to the most recently ACKed + // version for that resource. Resource versions are a property of the + // resource type and not the stream, and will not be reset upon stream + // restarts. + versions map[string]string + // nonces is a map from resource type URL to the most recently received + // nonce for that resource type. Nonces are a property of the ADS stream and + // will be reset upon stream restarts. + nonces map[string]string + + lrsMu sync.Mutex // Protects all LRS state. + lrsCancelStream context.CancelFunc // CancelFunc for the LRS stream. + lrsRefCount int // Reference count on the load store. +} + +// OnRecvHandlerFunc is the implementation at the xDS data model layer, which +// determines if the configuration received from the management server can be +// applied locally or not. +// +// A nil error is returned from this function when the data model layer believes +// that the received configuration is good and can be applied locally. This will +// cause the transport layer to send an ACK to the management server. A non-nil +// error is returned from this function when the data model layer believes +// otherwise, and this will cause the transport layer to send a NACK. +type OnRecvHandlerFunc func(update ResourceUpdate) error + +// OnSendHandlerFunc is the implementation at the authority, which handles state +// changes for the resource watch and stop watch timers accordingly. +type OnSendHandlerFunc func(update *ResourceSendInfo) + +// ResourceUpdate is a representation of the configuration update received from +// the management server. It only contains fields which are useful to the data +// model layer, and layers above it. +type ResourceUpdate struct { + // Resources is the list of resources received from the management server. + Resources []*anypb.Any + // URL is the resource type URL for the above resources. + URL string + // Version is the resource version, for the above resources, as specified by + // the management server. + Version string +} + +// Options specifies configuration knobs used when creating a new Transport. +type Options struct { + // ServerCfg contains all the configuration required to connect to the xDS + // management server. + ServerCfg bootstrap.ServerConfig + // OnRecvHandler is the component which makes ACK/NACK decisions based on + // the received resources. + // + // Invoked inline and implementations must not block. + OnRecvHandler OnRecvHandlerFunc + // OnErrorHandler provides a way for the transport layer to report + // underlying stream errors. These can be bubbled all the way up to the user + // of the xdsClient. + // + // Invoked inline and implementations must not block. + OnErrorHandler func(error) + // OnSendHandler provides a way for the transport layer to report underlying + // resource requests sent on the stream. However, Send() on the ADS stream will + // return successfully as long as: + // 1. there is enough flow control quota to send the message. + // 2. the message is added to the send buffer. + // However, the connection may fail after the callback is invoked and before + // the message is actually sent on the wire. This is accepted. + // + // Invoked inline and implementations must not block. + OnSendHandler func(*ResourceSendInfo) + // Backoff controls the amount of time to backoff before recreating failed + // ADS streams. If unspecified, a default exponential backoff implementation + // is used. For more details, see: + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. + Backoff func(retries int) time.Duration + // Logger does logging with a prefix. + Logger *grpclog.PrefixLogger + // NodeProto contains the Node proto to be used in xDS requests. This will be + // of type *v3corepb.Node. + NodeProto *v3corepb.Node +} + +// For overriding in unit tests. +var grpcDial = grpc.Dial + +// New creates a new Transport. +func New(opts Options) (*Transport, error) { + switch { + case opts.ServerCfg.ServerURI == "": + return nil, errors.New("missing server URI when creating a new transport") + case opts.ServerCfg.CredsDialOption() == nil: + return nil, errors.New("missing credentials when creating a new transport") + case opts.OnRecvHandler == nil: + return nil, errors.New("missing OnRecv callback handler when creating a new transport") + case opts.OnErrorHandler == nil: + return nil, errors.New("missing OnError callback handler when creating a new transport") + case opts.OnSendHandler == nil: + return nil, errors.New("missing OnSend callback handler when creating a new transport") + } + + // Dial the xDS management with the passed in credentials. + dopts := []grpc.DialOption{ + opts.ServerCfg.CredsDialOption(), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + // We decided to use these sane defaults in all languages, and + // kicked the can down the road as far making these configurable. + Time: 5 * time.Minute, + Timeout: 20 * time.Second, + }), + } + cc, err := grpcDial(opts.ServerCfg.ServerURI, dopts...) + if err != nil { + // An error from a non-blocking dial indicates something serious. + return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI, err) + } + + boff := opts.Backoff + if boff == nil { + boff = backoff.DefaultExponential.Backoff + } + ret := &Transport{ + cc: cc, + serverURI: opts.ServerCfg.ServerURI, + onRecvHandler: opts.OnRecvHandler, + onErrorHandler: opts.OnErrorHandler, + onSendHandler: opts.OnSendHandler, + lrsStore: load.NewStore(), + backoff: boff, + nodeProto: opts.NodeProto, + logger: opts.Logger, + + adsStreamCh: make(chan adsStream, 1), + adsRequestCh: buffer.NewUnbounded(), + resources: make(map[string]map[string]bool), + versions: make(map[string]string), + nonces: make(map[string]string), + adsRunnerDoneCh: make(chan struct{}), + } + + // This context is used for sending and receiving RPC requests and + // responses. It is also used by all the goroutines spawned by this + // Transport. Therefore, cancelling this context when the transport is + // closed will essentially cancel any pending RPCs, and cause the goroutines + // to terminate. + ctx, cancel := context.WithCancel(context.Background()) + ret.adsRunnerCancel = cancel + go ret.adsRunner(ctx) + + ret.logger.Infof("Created transport to server %q", ret.serverURI) + return ret, nil +} + +// resourceRequest wraps the resource type url and the resource names requested +// by the user of this transport. +type resourceRequest struct { + resources []string + url string +} + +// SendRequest sends out an ADS request for the provided resources of the +// specified resource type. +// +// The request is sent out asynchronously. If no valid stream exists at the time +// of processing this request, it is queued and will be sent out once a valid +// stream exists. +// +// If a successful response is received, the update handler callback provided at +// creation time is invoked. If an error is encountered, the stream error +// handler callback provided at creation time is invoked. +func (t *Transport) SendRequest(url string, resources []string) { + t.adsRequestCh.Put(&resourceRequest{ + url: url, + resources: resources, + }) +} + +func (t *Transport) newAggregatedDiscoveryServiceStream(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { + // The transport retries the stream with an exponential backoff whenever the + // stream breaks without ever having seen a response. + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) +} + +// ResourceSendInfo wraps the names and url of resources sent to the management +// server. This is used by the `authority` type to start/stop the watch timer +// associated with every resource in the update. +type ResourceSendInfo struct { + ResourceNames []string + URL string +} + +func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, sendNodeProto bool, resourceNames []string, resourceURL, version, nonce string, nackErr error) error { + req := &v3discoverypb.DiscoveryRequest{ + TypeUrl: resourceURL, + ResourceNames: resourceNames, + VersionInfo: version, + ResponseNonce: nonce, + } + if sendNodeProto { + req.Node = t.nodeProto + } + if nackErr != nil { + req.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), Message: nackErr.Error(), + } + } + if err := stream.Send(req); err != nil { + return err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) + } else { + t.logger.Debugf("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + } + t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) + return nil +} + +func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (resources []*anypb.Any, resourceURL, version, nonce string, err error) { + resp, err := stream.Recv() + if err != nil { + return nil, "", "", "", err + } + if t.logger.V(perRPCVerbosityLevel) { + t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) + } else { + t.logger.Debugf("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) + } + return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil +} + +// adsRunner starts an ADS stream (and backs off exponentially, if the previous +// stream failed without receiving a single reply) and runs the sender and +// receiver routines to send and receive data from the stream respectively. +func (t *Transport) adsRunner(ctx context.Context) { + defer close(t.adsRunnerDoneCh) + + go t.send(ctx) + + backoffAttempt := 0 + backoffTimer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-backoffTimer.C: + case <-ctx.Done(): + backoffTimer.Stop() + return + } + + // We reset backoff state when we successfully receive at least one + // message from the server. + resetBackoff := func() bool { + stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) + if err != nil { + t.onErrorHandler(err) + t.logger.Warningf("Creating new ADS stream failed: %v", err) + return false + } + t.logger.Infof("ADS stream created") + + select { + case <-t.adsStreamCh: + default: + } + t.adsStreamCh <- stream + return t.recv(stream) + }() + + if resetBackoff { + backoffTimer.Reset(0) + backoffAttempt = 0 + } else { + backoffTimer.Reset(t.backoff(backoffAttempt)) + backoffAttempt++ + } + } +} + +// send is a separate goroutine for sending resource requests on the ADS stream. +// +// For every new stream received on the stream channel, all existing resources +// are re-requested from the management server. +// +// For every new resource request received on the resources channel, the +// resources map is updated (this ensures that resend will pick them up when +// there are new streams) and the appropriate request is sent out. +func (t *Transport) send(ctx context.Context) { + var stream adsStream + // The xDS protocol only requires that we send the node proto in the first + // discovery request on every stream. Sending the node proto in every + // request message wastes CPU resources on the client and the server. + sendNodeProto := true + for { + select { + case <-ctx.Done(): + return + case stream = <-t.adsStreamCh: + // We have a new stream and we've to ensure that the node proto gets + // sent out in the first request on the stream. At this point, we + // might not have any registered watches. Setting this field to true + // here will ensure that the node proto gets sent out along with the + // discovery request when the first watch is registered. + if len(t.resources) == 0 { + sendNodeProto = true + continue + } + + if !t.sendExisting(stream) { + // Send failed, clear the current stream. Attempt to resend will + // only be made after a new stream is created. + stream = nil + continue + } + sendNodeProto = false + case u, ok := <-t.adsRequestCh.Get(): + if !ok { + // No requests will be sent after the adsRequestCh buffer is closed. + return + } + t.adsRequestCh.Load() + + var ( + resources []string + url, version, nonce string + send bool + nackErr error + ) + switch update := u.(type) { + case *resourceRequest: + resources, url, version, nonce = t.processResourceRequest(update) + case *ackRequest: + resources, url, version, nonce, send = t.processAckRequest(update, stream) + if !send { + continue + } + nackErr = update.nackErr + } + if stream == nil { + // There's no stream yet. Skip the request. This request + // will be resent to the new streams. If no stream is + // created, the watcher will timeout (same as server not + // sending response back). + continue + } + if err := t.sendAggregatedDiscoveryServiceRequest(stream, sendNodeProto, resources, url, version, nonce, nackErr); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, version, nonce, err) + // Send failed, clear the current stream. + stream = nil + } + sendNodeProto = false + } + } +} + +// sendExisting sends out xDS requests for existing resources when recovering +// from a broken stream. +// +// We call stream.Send() here with the lock being held. It should be OK to do +// that here because the stream has just started and Send() usually returns +// quickly (once it pushes the message onto the transport layer) and is only +// ever blocked if we don't have enough flow control quota. +func (t *Transport) sendExisting(stream adsStream) bool { + t.mu.Lock() + defer t.mu.Unlock() + + // Reset only the nonces map when the stream restarts. + // + // xDS spec says the following. See section: + // https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#ack-nack-and-resource-type-instance-version + // + // Note that the version for a resource type is not a property of an + // individual xDS stream but rather a property of the resources themselves. If + // the stream becomes broken and the client creates a new stream, the client’s + // initial request on the new stream should indicate the most recent version + // seen by the client on the previous stream + t.nonces = make(map[string]string) + + // Send node proto only in the first request on the stream. + sendNodeProto := true + for url, resources := range t.resources { + if err := t.sendAggregatedDiscoveryServiceRequest(stream, sendNodeProto, mapToSlice(resources), url, t.versions[url], "", nil); err != nil { + t.logger.Warningf("Sending ADS request for resources: %q, url: %q, version: %q, nonce: %q failed: %v", resources, url, t.versions[url], "", err) + return false + } + sendNodeProto = false + } + + return true +} + +// recv receives xDS responses on the provided ADS stream and branches out to +// message specific handlers. Returns true if at least one message was +// successfully received. +func (t *Transport) recv(stream adsStream) bool { + msgReceived := false + for { + resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) + if err != nil { + // Note that we do not consider it an error if the ADS stream was closed + // after having received a response on the stream. This is because there + // are legitimate reasons why the server may need to close the stream during + // normal operations, such as needing to rebalance load or the underlying + // connection hitting its max connection age limit. + // (see [gRFC A9](https://github.com/grpc/proposal/blob/master/A9-server-side-conn-mgt.md)). + if msgReceived { + err = xdsresource.NewErrorf(xdsresource.ErrTypeStreamFailedAfterRecv, err.Error()) + } + t.onErrorHandler(err) + t.logger.Warningf("ADS stream closed: %v", err) + return msgReceived + } + msgReceived = true + + err = t.onRecvHandler(ResourceUpdate{ + Resources: resources, + URL: url, + Version: rVersion, + }) + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { + t.logger.Warningf("%v", err) + continue + } + // If the data model layer returned an error, we need to NACK the + // response in which case we need to set the version to the most + // recently accepted version of this resource type. + if err != nil { + t.mu.Lock() + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: t.versions[url], + nackErr: err, + }) + t.mu.Unlock() + t.logger.Warningf("Sending NACK for resource type: %q, version: %q, nonce: %q, reason: %v", url, rVersion, nonce, err) + continue + } + t.adsRequestCh.Put(&ackRequest{ + url: url, + nonce: nonce, + stream: stream, + version: rVersion, + }) + t.logger.Debugf("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) + } +} + +func mapToSlice(m map[string]bool) []string { + ret := make([]string, 0, len(m)) + for i := range m { + ret = append(ret, i) + } + return ret +} + +func sliceToMap(ss []string) map[string]bool { + ret := make(map[string]bool, len(ss)) + for _, s := range ss { + ret[s] = true + } + return ret +} + +// processResourceRequest pulls the fields needed to send out an ADS request. +// The resource type and the list of resources to request are provided by the +// user, while the version and nonce are maintained internally. +// +// The resources map, which keeps track of the resources being requested, is +// updated here. Any subsequent stream failure will re-request resources stored +// in this map. +// +// Returns the list of resources, resource type url, version and nonce. +func (t *Transport) processResourceRequest(req *resourceRequest) ([]string, string, string, string) { + t.mu.Lock() + defer t.mu.Unlock() + + resources := sliceToMap(req.resources) + t.resources[req.url] = resources + return req.resources, req.url, t.versions[req.url], t.nonces[req.url] +} + +type ackRequest struct { + url string // Resource type URL. + version string // NACK if version is an empty string. + nonce string + nackErr error // nil for ACK, non-nil for NACK. + // ACK/NACK are tagged with the stream it's for. When the stream is down, + // all the ACK/NACK for this stream will be dropped, and the version/nonce + // won't be updated. + stream grpc.ClientStream +} + +// processAckRequest pulls the fields needed to send out an ADS ACK. The nonces +// and versions map is updated. +// +// Returns the list of resources, resource type url, version, nonce, and an +// indication of whether an ACK should be sent on the wire or not. +func (t *Transport) processAckRequest(ack *ackRequest, stream grpc.ClientStream) ([]string, string, string, string, bool) { + if ack.stream != stream { + // If ACK's stream isn't the current sending stream, this means the ACK + // was pushed to queue before the old stream broke, and a new stream has + // been started since. Return immediately here so we don't update the + // nonce for the new stream. + return nil, "", "", "", false + } + + t.mu.Lock() + defer t.mu.Unlock() + + // Update the nonce irrespective of whether we send the ACK request on wire. + // An up-to-date nonce is required for the next request. + nonce := ack.nonce + t.nonces[ack.url] = nonce + + s, ok := t.resources[ack.url] + if !ok || len(s) == 0 { + // We don't send the ACK request if there are no resources of this type + // in our resources map. This can be either when the server sends + // responses before any request, or the resources are removed while the + // ackRequest was in queue). If we send a request with an empty + // resource name list, the server may treat it as a wild card and send + // us everything. + return nil, "", "", "", false + } + resources := mapToSlice(s) + + // Update the versions map only when we plan to send an ACK. + if ack.nackErr == nil { + t.versions[ack.url] = ack.version + } + + return resources, ack.url, ack.version, nonce, true +} + +// Close closes the Transport and frees any associated resources. +func (t *Transport) Close() { + t.adsRunnerCancel() + <-t.adsRunnerDoneCh + t.adsRequestCh.Close() + t.cc.Close() +} + +// ChannelConnectivityStateForTesting returns the connectivity state of the gRPC +// channel to the management server. +// +// Only for testing purposes. +func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { + return t.cc.GetState() +} diff --git a/xds/internal/xdsclient/transport/transport_ack_nack_test.go b/xds/internal/xdsclient/transport/transport_ack_nack_test.go new file mode 100644 index 000000000000..f887ae1de0bd --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_ack_nack_test.go @@ -0,0 +1,520 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +var ( + errWantNack = errors.New("unsupported field 'use_original_dst' is present and set to true") + + // A simple update handler for listener resources which validates only the + // `use_original_dst` field. + dataModelValidator = func(update transport.ResourceUpdate) error { + for _, r := range update.Resources { + inner := &v3discoverypb.Resource{} + if err := proto.Unmarshal(r.GetValue(), inner); err != nil { + return fmt.Errorf("failed to unmarshal DiscoveryResponse: %v", err) + } + lis := &v3listenerpb.Listener{} + if err := proto.Unmarshal(r.GetValue(), lis); err != nil { + return fmt.Errorf("failed to unmarshal DiscoveryResponse: %v", err) + } + if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { + return errWantNack + } + } + return nil + } +) + +// TestSimpleAckAndNack tests simple ACK and NACK scenarios. +// 1. When the data model layer likes a received response, the test verifies +// that an ACK is sent matching the version and nonce from the response. +// 2. When a subsequent response is disliked by the data model layer, the test +// verifies that a NACK is sent matching the previously ACKed version and +// current nonce from the response. +// 3. When a subsequent response is liked by the data model layer, the test +// verifies that an ACK is sent matching the version and nonce from the +// current response. +func (s) TestSimpleAckAndNack(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Create an xDS management server listening on a local port. Configure the + // request and response handlers to push on channels which are inspected by + // the test goroutine to verify ack version and nonce. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) + streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + case <-ctx.Done(): + } + return nil + }, + OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { + select { + case streamResponseCh <- resp: + case <-ctx.Done(): + } + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Configure the management server with appropriate resources. + apiListener := &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + const resourceName = "resource name 1" + listenerResource := &v3listenerpb.Listener{ + Name: resourceName, + ApiListener: apiListener, + } + nodeID := uuid.New().String() + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource}, + SkipValidation: true, + }) + + // Create a new transport. + tr, err := transport.New(transport.Options{ + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + OnRecvHandler: dataModelValidator, + OnErrorHandler: func(err error) {}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + NodeProto: &v3corepb.Node{Id: nodeID}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Capture the version and nonce from the response. + var gotResp *v3discoverypb.DiscoveryResponse + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // Verify that the ACK contains the appropriate version and nonce. + wantReq.VersionInfo = gotResp.GetVersionInfo() + wantReq.ResponseNonce = gotResp.GetNonce() + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Update the management server's copy of the resource to include a field + // which will cause the resource to be NACKed. + badListener := proto.Clone(listenerResource).(*v3listenerpb.Listener) + badListener.UseOriginalDst = &wrapperspb.BoolValue{Value: true} + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{badListener}, + SkipValidation: true, + }) + + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // Verify that the NACK contains the appropriate version, nonce and error. + // We expect the version to not change as this is a NACK. + wantReq.ResponseNonce = gotResp.GetNonce() + wantReq.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), + Message: errWantNack.Error(), + } + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Update the management server to send a good resource again. + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource}, + SkipValidation: true, + }) + + // The envoy-go-control-plane management server keeps resending the same + // resource as long as we keep NACK'ing it. So, we will see the bad resource + // sent to us a few times here, before receiving the good resource. + for { + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // Verify that the ACK contains the appropriate version and nonce. + wantReq.VersionInfo = gotResp.GetVersionInfo() + wantReq.ResponseNonce = gotResp.GetNonce() + wantReq.ErrorDetail = nil + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)) + if diff == "" { + break + } + t.Logf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} + +// TestInvalidFirstResponse tests the case where the first response is invalid. +// The test verifies that the NACK contains an empty version string. +func (s) TestInvalidFirstResponse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Create an xDS management server listening on a local port. Configure the + // request and response handlers to push on channels which are inspected by + // the test goroutine to verify ack version and nonce. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) + streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + case <-ctx.Done(): + } + return nil + }, + OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { + select { + case streamResponseCh <- resp: + case <-ctx.Done(): + } + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Configure the management server with appropriate resources. + apiListener := &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + const resourceName = "resource name 1" + listenerResource := &v3listenerpb.Listener{ + Name: resourceName, + ApiListener: apiListener, + UseOriginalDst: &wrapperspb.BoolValue{Value: true}, // This will cause the resource to be NACKed. + } + nodeID := uuid.New().String() + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource}, + SkipValidation: true, + }) + + // Create a new transport. + tr, err := transport.New(transport.Options{ + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + OnRecvHandler: dataModelValidator, + OnErrorHandler: func(err error) {}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + var gotResp *v3discoverypb.DiscoveryResponse + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // NACK should contain the appropriate error, nonce, but empty version. + wantReq.VersionInfo = "" + wantReq.ResponseNonce = gotResp.GetNonce() + wantReq.ErrorDetail = &statuspb.Status{ + Code: int32(codes.InvalidArgument), + Message: errWantNack.Error(), + } + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} + +// TestResourceIsNotRequestedAnymore tests the scenario where the xDS client is +// no longer interested in a resource. The following sequence of events are +// tested: +// 1. A resource is requested and a good response is received. The test verifies +// that an ACK is sent for this resource. +// 2. The previously requested resource is no longer requested. The test +// verifies that a request with no resource names is sent out. +// 3. The same resource is requested again. The test verifies that the request +// is sent with the previously ACKed version. +func (s) TestResourceIsNotRequestedAnymore(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Create an xDS management server listening on a local port. Configure the + // request and response handlers to push on channels which are inspected by + // the test goroutine to verify ack version and nonce. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) + streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + case <-ctx.Done(): + } + return nil + }, + OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { + select { + case streamResponseCh <- resp: + case <-ctx.Done(): + } + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Configure the management server with appropriate resources. + apiListener := &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + const resourceName = "resource name 1" + listenerResource := &v3listenerpb.Listener{ + Name: resourceName, + ApiListener: apiListener, + } + nodeID := uuid.New().String() + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource}, + SkipValidation: true, + }) + + // Create a new transport. + tr, err := transport.New(transport.Options{ + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + NodeProto: &v3corepb.Node{Id: nodeID}, + OnRecvHandler: dataModelValidator, + OnErrorHandler: func(err error) {}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Capture the version and nonce from the response. + var gotResp *v3discoverypb.DiscoveryResponse + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + + // Verify that the ACK contains the appropriate version and nonce. + wantReq.VersionInfo = gotResp.GetVersionInfo() + wantReq.ResponseNonce = gotResp.GetNonce() + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Send a discovery request with no resource names. + tr.SendRequest(version.V3ListenerURL, []string{}) + + // Verify that the discovery request matches expectation. + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq.ResourceNames = nil + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Send a discovery request for the same resource requested earlier. + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Verify that the discovery request contains the version from the + // previously received response. + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq.ResourceNames = []string{resourceName} + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} diff --git a/xds/internal/xdsclient/transport/transport_backoff_test.go b/xds/internal/xdsclient/transport/transport_backoff_test.go new file mode 100644 index 000000000000..db7587ca7c30 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_backoff_test.go @@ -0,0 +1,444 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport_test + +import ( + "context" + "errors" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/google/uuid" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +var strSort = func(s1, s2 string) bool { return s1 < s2 } + +// TestTransport_BackoffAfterStreamFailure tests the case where the management +// server returns an error in the ADS streaming RPC. The test verifies the +// following: +// 1. Initial discovery request matches expectation. +// 2. RPC error is propagated via the stream error handler. +// 3. When the stream is closed, the transport backs off. +// 4. The same discovery request is sent on the newly created stream. +func (s) TestTransport_BackoffAfterStreamFailure(t *testing.T) { + // Channels used for verifying different events in the test. + streamCloseCh := make(chan struct{}, 1) // ADS stream is closed. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) // Discovery request is received. + backoffCh := make(chan struct{}, 1) // Transport backoff after stream failure. + streamErrCh := make(chan error, 1) // Stream error seen by the transport. + + // Create an xDS management server listening on a local port. + streamErr := errors.New("ADS stream error") + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + // Push on a channel whenever the stream is closed. + OnStreamClosed: func(int64, *v3corepb.Node) { + select { + case streamCloseCh <- struct{}{}: + default: + } + }, + + // Return an error everytime a request is sent on the stream. This + // should cause the transport to backoff before attempting to recreate + // the stream. + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + default: + } + return streamErr + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + + // Override the backoff implementation to push on a channel that is read by + // the test goroutine. + transportBackoff := func(v int) time.Duration { + select { + case backoffCh <- struct{}{}: + default: + } + return 0 + } + + // Create a new transport. Since we are only testing backoff behavior here, + // we can pass a no-op data model layer implementation. + nodeID := uuid.New().String() + tr, err := transport.New(transport.Options{ + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + OnErrorHandler: func(err error) { + select { + case streamErrCh <- err: + default: + } + }, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + Backoff: transportBackoff, + NodeProto: &v3corepb.Node{Id: nodeID}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + const resourceName = "resource name" + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Verify that the received stream error is reported to the user. + var gotErr error + select { + case gotErr = <-streamErrCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for stream error to be reported to the user") + } + if !strings.Contains(gotErr.Error(), streamErr.Error()) { + t.Fatalf("Received stream error: %v, wantErr: %v", gotErr, streamErr) + } + + // Verify that the stream is closed. + select { + case <-streamCloseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for stream to be closed after an error") + } + + // Verify that the transport backs off before recreating the stream. + select { + case <-backoffCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for transport to backoff after stream failure") + } + + // Verify that the same discovery request is resent on the new stream. + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} + +// TestTransport_RetriesAfterBrokenStream tests the case where a stream breaks +// because the server goes down. The test verifies the following: +// 1. Initial discovery request matches expectation. +// 2. Good response from the server leads to an ACK with appropriate version. +// 3. Management server going down, leads to stream failure. +// 4. Once the management server comes back up, the same resources are +// re-requested, this time with an empty nonce. +func (s) TestTransport_RetriesAfterBrokenStream(t *testing.T) { + // Channels used for verifying different events in the test. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) // Discovery request is received. + streamResponseCh := make(chan *v3discoverypb.DiscoveryResponse, 1) // Discovery response is received. + streamErrCh := make(chan error, 1) // Stream error seen by the transport. + + // Create an xDS management server listening on a local port. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("Failed to create a local listener for the xDS management server: %v", err) + } + lis := testutils.NewRestartableListener(l) + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + Listener: lis, + // Push the received request on to a channel for the test goroutine to + // verify that it matches expectations. + OnStreamRequest: func(_ int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + default: + } + return nil + }, + // Push the response that the management server is about to send on to a + // channel. The test goroutine to uses this to extract the version and + // nonce, expected on subsequent requests. + OnStreamResponse: func(_ context.Context, _ int64, _ *v3discoverypb.DiscoveryRequest, resp *v3discoverypb.DiscoveryResponse) { + select { + case streamResponseCh <- resp: + default: + } + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", lis.Addr().String()) + + // Configure the management server with appropriate resources. + apiListener := &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + const resourceName1 = "resource name 1" + const resourceName2 = "resource name 2" + listenerResource1 := &v3listenerpb.Listener{ + Name: resourceName1, + ApiListener: apiListener, + } + listenerResource2 := &v3listenerpb.Listener{ + Name: resourceName2, + ApiListener: apiListener, + } + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + nodeID := uuid.New().String() + mgmtServer.Update(ctx, e2e.UpdateOptions{ + NodeID: nodeID, + Listeners: []*v3listenerpb.Listener{listenerResource1, listenerResource2}, + SkipValidation: true, + }) + + // Create a new transport. Since we are only testing backoff behavior here, + // we can pass a no-op data model layer implementation. + tr, err := transport.New(transport.Options{ + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + OnErrorHandler: func(err error) { + select { + case streamErrCh <- err: + default: + } + }, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: nodeID}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + tr.SendRequest(version.V3ListenerURL, []string{resourceName1, resourceName2}) + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName1, resourceName2}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Capture the version and nonce from the response. + var gotResp *v3discoverypb.DiscoveryResponse + select { + case gotResp = <-streamResponseCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery response on the stream") + } + version := gotResp.GetVersionInfo() + nonce := gotResp.GetNonce() + + // Verify that the ACK contains the appropriate version and nonce. + wantReq.VersionInfo = version + wantReq.ResponseNonce = nonce + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } + + // Bring down the management server to simulate a broken stream. + lis.Stop() + + // We don't care about the exact error here and it can vary based on which + // error gets reported first, the Recv() failure or the new stream creation + // failure. So, all we check here is whether we get an error or not. + select { + case <-streamErrCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for stream error to be reported to the user") + } + + // Bring up the connection to the management server. + lis.Restart() + + // Verify that the transport creates a new stream and sends out a new + // request which contains the previously acked version, but an empty nonce. + wantReq.ResponseNonce = "" + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for the discovery request ACK on the stream") + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform(), cmpopts.SortSlices(strSort)); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} + +// TestTransport_ResourceRequestedBeforeStreamCreation tests the case where a +// resource is requested before the transport has a valid stream. Verifies that +// the transport sends out the request once it has a valid stream. +func (s) TestTransport_ResourceRequestedBeforeStreamCreation(t *testing.T) { + // Channels used for verifying different events in the test. + streamRequestCh := make(chan *v3discoverypb.DiscoveryRequest, 1) // Discovery request is received. + + // Create an xDS management server listening on a local port. + l, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("Failed to create a local listener for the xDS management server: %v", err) + } + lis := testutils.NewRestartableListener(l) + streamErr := errors.New("ADS stream error") + + mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{ + Listener: lis, + + // Return an error everytime a request is sent on the stream. This + // should cause the transport to backoff before attempting to recreate + // the stream. + OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error { + select { + case streamRequestCh <- req: + default: + } + return streamErr + }, + }) + if err != nil { + t.Fatalf("Failed to start xDS management server: %v", err) + } + defer mgmtServer.Stop() + t.Logf("Started xDS management server on %s", lis.Addr().String()) + + // Bring down the management server before creating the transport. This + // allows us to test the case where SendRequest() is called when there is no + // stream to the management server. + lis.Stop() + + // Create a new transport. Since we are only testing backoff behavior here, + // we can pass a no-op data model layer implementation. + nodeID := uuid.New().String() + tr, err := transport.New(transport.Options{ + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, // No data model layer validation. + OnErrorHandler: func(error) {}, // No stream error handling. + OnSendHandler: func(*transport.ResourceSendInfo) {}, // No on send handler + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: nodeID}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send a discovery request through the transport. + const resourceName = "resource name" + tr.SendRequest(version.V3ListenerURL, []string{resourceName}) + + // Wait until the transport has attempted to connect to the management + // server and has seen the connection fail. In this case, since the + // connection is down, and the transport creates streams with WaitForReady() + // set to true, stream creation will never fail (unless the context + // expires), and therefore we cannot rely on the stream error handler. + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + for ; ctx.Err() == nil; <-time.After(defaultTestShortTimeout) { + if tr.ChannelConnectivityStateForTesting() == connectivity.TransientFailure { + break + } + } + + lis.Restart() + + // Verify that the initial discovery request matches expectation. + var gotReq *v3discoverypb.DiscoveryRequest + select { + case gotReq = <-streamRequestCh: + case <-ctx.Done(): + t.Fatalf("Timeout waiting for discovery request on the stream") + } + wantReq := &v3discoverypb.DiscoveryRequest{ + VersionInfo: "", + Node: &v3corepb.Node{Id: nodeID}, + ResourceNames: []string{resourceName}, + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + ResponseNonce: "", + } + if diff := cmp.Diff(gotReq, wantReq, protocmp.Transform()); diff != "" { + t.Fatalf("Unexpected diff in received discovery request, diff (-got, +want):\n%s", diff) + } +} diff --git a/xds/internal/xdsclient/transport/transport_new_test.go b/xds/internal/xdsclient/transport/transport_new_test.go new file mode 100644 index 000000000000..2f6c3148a267 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_new_test.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport_test + +import ( + "strings" + "testing" + + "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +) + +// TestNew covers that New() returns an error if the input *ServerConfig +// contains invalid content. +func (s) TestNew(t *testing.T) { + tests := []struct { + name string + opts transport.Options + wantErrStr string + }{ + { + name: "missing server URI", + opts: transport.Options{ServerCfg: bootstrap.ServerConfig{}}, + wantErrStr: "missing server URI when creating a new transport", + }, + { + name: "missing credentials", + opts: transport.Options{ServerCfg: bootstrap.ServerConfig{ServerURI: "server-address"}}, + wantErrStr: "missing credentials when creating a new transport", + }, + { + name: "missing onRecv handler", + opts: transport.Options{ + ServerCfg: *testutils.ServerConfigForAddress(t, "server-address"), + NodeProto: &v3corepb.Node{}, + }, + wantErrStr: "missing OnRecv callback handler when creating a new transport", + }, + { + name: "missing onError handler", + opts: transport.Options{ + ServerCfg: *testutils.ServerConfigForAddress(t, "server-address"), + NodeProto: &v3corepb.Node{}, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + }, + wantErrStr: "missing OnError callback handler when creating a new transport", + }, + + { + name: "missing onSend handler", + opts: transport.Options{ + ServerCfg: *testutils.ServerConfigForAddress(t, "server-address"), + NodeProto: &v3corepb.Node{}, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, + OnErrorHandler: func(error) {}, + }, + wantErrStr: "missing OnSend callback handler when creating a new transport", + }, + { + name: "happy case", + opts: transport.Options{ + ServerCfg: *testutils.ServerConfigForAddress(t, "server-address"), + NodeProto: &v3corepb.Node{}, + OnRecvHandler: func(transport.ResourceUpdate) error { return nil }, + OnErrorHandler: func(error) {}, + OnSendHandler: func(*transport.ResourceSendInfo) {}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + c, err := transport.New(test.opts) + defer func() { + if c != nil { + c.Close() + } + }() + if (err != nil) != (test.wantErrStr != "") { + t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErrStr) + } + if err != nil && !strings.Contains(err.Error(), test.wantErrStr) { + t.Fatalf("New(%+v) = %v, wantErr: %v", test.opts, err, test.wantErrStr) + } + }) + } +} diff --git a/xds/internal/xdsclient/transport/transport_resource_test.go b/xds/internal/xdsclient/transport/transport_resource_test.go new file mode 100644 index 000000000000..0824af77f4ff --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_resource_test.go @@ -0,0 +1,219 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package transport_test contains e2e style tests for the xDS transport +// implementation. It uses the envoy-go-control-plane as the management server. +package transport_test + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/uuid" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/fakeserver" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" + "google.golang.org/grpc/xds/internal/xdsclient/transport" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + defaultTestTimeout = 5 * time.Second + defaultTestShortTimeout = 10 * time.Millisecond +) + +// startFakeManagementServer starts a fake xDS management server and returns a +// cleanup function to close the fake server. +func startFakeManagementServer(t *testing.T) (*fakeserver.Server, func()) { + t.Helper() + fs, sCleanup, err := fakeserver.StartServer(nil) + if err != nil { + t.Fatalf("Failed to start fake xDS server: %v", err) + } + return fs, sCleanup +} + +// resourcesWithTypeURL wraps resources and type URL received from server. +type resourcesWithTypeURL struct { + resources []*anypb.Any + url string +} + +// TestHandleResponseFromManagementServer covers different scenarios of the +// transport receiving a response from the management server. In all scenarios, +// the trasport is expected to pass the received responses as-is to the data +// model layer for validation and not perform any validation on its own. +func (s) TestHandleResponseFromManagementServer(t *testing.T) { + const ( + resourceName1 = "resource-name-1" + resourceName2 = "resource-name-2" + ) + var ( + badlyMarshaledResource = &anypb.Any{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Value: []byte{1, 2, 3, 4}, + } + apiListener = &v3listenerpb.ApiListener{ + ApiListener: func() *anypb.Any { + return testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-configuration-name", + }, + }, + }) + }(), + } + resource1 = &v3listenerpb.Listener{ + Name: resourceName1, + ApiListener: apiListener, + } + resource2 = &v3listenerpb.Listener{ + Name: resourceName2, + ApiListener: apiListener, + } + ) + + tests := []struct { + desc string + resourceNamesToRequest []string + managementServerResponse *v3discoverypb.DiscoveryResponse + wantURL string + wantResources []*anypb.Any + }{ + { + desc: "badly marshaled response", + resourceNamesToRequest: []string{resourceName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{badlyMarshaledResource}, + }, + wantURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + wantResources: []*anypb.Any{badlyMarshaledResource}, + }, + { + desc: "empty response", + resourceNamesToRequest: []string{resourceName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{}, + wantURL: "", + wantResources: nil, + }, + { + desc: "one good resource", + resourceNamesToRequest: []string{resourceName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + wantURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + wantResources: []*anypb.Any{testutils.MarshalAny(resource1)}, + }, + { + desc: "two good resources", + resourceNamesToRequest: []string{resourceName1, resourceName2}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + wantResources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + { + desc: "two resources when we requested one", + resourceNamesToRequest: []string{resourceName1}, + managementServerResponse: &v3discoverypb.DiscoveryResponse{ + TypeUrl: "type.googleapis.com/envoy.config.listener.v3.Listener", + Resources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + wantURL: "type.googleapis.com/envoy.config.listener.v3.Listener", + wantResources: []*anypb.Any{testutils.MarshalAny(resource1), testutils.MarshalAny(resource2)}, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + // Create a fake xDS management server listening on a local port, + // and set it up with the response to send. + mgmtServer, cleanup := startFakeManagementServer(t) + defer cleanup() + t.Logf("Started xDS management server on %s", mgmtServer.Address) + mgmtServer.XDSResponseChan <- &fakeserver.Response{Resp: test.managementServerResponse} + + // Create a new transport. + resourcesCh := testutils.NewChannel() + tr, err := transport.New(transport.Options{ + ServerCfg: *xdstestutils.ServerConfigForAddress(t, mgmtServer.Address), + // No validation. Simply push received resources on a channel. + OnRecvHandler: func(update transport.ResourceUpdate) error { + resourcesCh.Send(&resourcesWithTypeURL{ + resources: update.Resources, + url: update.URL, + // Ignore resource version here. + }) + return nil + }, + OnSendHandler: func(*transport.ResourceSendInfo) {}, // No onSend handling. + OnErrorHandler: func(error) {}, // No stream error handling. + Backoff: func(int) time.Duration { return time.Duration(0) }, // No backoff. + NodeProto: &v3corepb.Node{Id: uuid.New().String()}, + }) + if err != nil { + t.Fatalf("Failed to create xDS transport: %v", err) + } + defer tr.Close() + + // Send the request, and validate that the response sent by the + // management server is propagated to the data model layer. + tr.SendRequest(version.V3ListenerURL, test.resourceNamesToRequest) + ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout) + defer cancel() + v, err := resourcesCh.Receive(ctx) + if err != nil { + t.Fatalf("Failed to receive resources at the data model layer: %v", err) + } + gotURL := v.(*resourcesWithTypeURL).url + gotResources := v.(*resourcesWithTypeURL).resources + if gotURL != test.wantURL { + t.Fatalf("Received resource URL in response: %s, want %s", gotURL, test.wantURL) + } + if diff := cmp.Diff(gotResources, test.wantResources, protocmp.Transform()); diff != "" { + t.Fatalf("Received unexpected resources. Diff (-got, +want):\n%s", diff) + } + }) + } +} diff --git a/xds/internal/xdsclient/transport/transport_test.go b/xds/internal/xdsclient/transport/transport_test.go new file mode 100644 index 000000000000..50dc74dd0ba2 --- /dev/null +++ b/xds/internal/xdsclient/transport/transport_test.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package transport + +import ( + "testing" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/internal/grpctest" + xdstestutils "google.golang.org/grpc/xds/internal/testutils" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func (s) TestNewWithGRPCDial(t *testing.T) { + // Override the dialer with a custom one. + customDialerCalled := false + customDialer := func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + customDialerCalled = true + return grpc.Dial(target, opts...) + } + oldDial := grpcDial + grpcDial = customDialer + defer func() { grpcDial = oldDial }() + + // Create a new transport and ensure that the custom dialer was called. + opts := Options{ + ServerCfg: *xdstestutils.ServerConfigForAddress(t, "server-address"), + NodeProto: &v3corepb.Node{}, + OnRecvHandler: func(ResourceUpdate) error { return nil }, + OnErrorHandler: func(error) {}, + OnSendHandler: func(*ResourceSendInfo) {}, + } + c, err := New(opts) + if err != nil { + t.Fatalf("New(%v) failed: %v", opts, err) + } + defer c.Close() + + if !customDialerCalled { + t.Fatalf("New(%+v) custom dialer called = false, want true", opts) + } + customDialerCalled = false + + // Reset the dialer, create a new transport and ensure that our custom + // dialer is no longer called. + grpcDial = grpc.Dial + c, err = New(opts) + defer func() { + if c != nil { + c.Close() + } + }() + if err != nil { + t.Fatalf("New(%v) failed: %v", opts, err) + } + + if customDialerCalled { + t.Fatalf("New(%+v) custom dialer called = true, want false", opts) + } +} diff --git a/xds/internal/balancer/balancergroup/testutils_test.go b/xds/internal/xdsclient/xdsclient_test.go similarity index 97% rename from xds/internal/balancer/balancergroup/testutils_test.go rename to xds/internal/xdsclient/xdsclient_test.go index 1429fa87b3f2..d7bb926659f3 100644 --- a/xds/internal/balancer/balancergroup/testutils_test.go +++ b/xds/internal/xdsclient/xdsclient_test.go @@ -16,7 +16,7 @@ * */ -package balancergroup +package xdsclient_test import ( "testing" diff --git a/xds/internal/xdsclient/xdslbregistry/converter/converter.go b/xds/internal/xdsclient/xdslbregistry/converter/converter.go new file mode 100644 index 000000000000..c5d5afe4ebdc --- /dev/null +++ b/xds/internal/xdsclient/xdslbregistry/converter/converter.go @@ -0,0 +1,234 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package converter provides converters to convert proto load balancing +// configuration, defined by the xDS API spec, to JSON load balancing +// configuration. These converters are registered by proto type in a registry, +// which gets pulled from based off proto type passed in. +package converter + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/balancer/weightedroundrobin" + "google.golang.org/grpc/internal/envconfig" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clientsideweightedroundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/client_side_weighted_round_robin/v3" + v3pickfirstpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + structpb "github.com/golang/protobuf/ptypes/struct" +) + +func init() { + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.client_side_weighted_round_robin.v3.ClientSideWeightedRoundRobin", convertWeightedRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.ring_hash.v3.RingHash", convertRingHashProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.pick_first.v3.PickFirst", convertPickFirstProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.round_robin.v3.RoundRobin", convertRoundRobinProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/envoy.extensions.load_balancing_policies.wrr_locality.v3.WrrLocality", convertWRRLocalityProtoToServiceConfig) + xdslbregistry.Register("type.googleapis.com/udpa.type.v1.TypedStruct", convertV1TypedStructToServiceConfig) + xdslbregistry.Register("type.googleapis.com/xds.type.v3.TypedStruct", convertV3TypedStructToServiceConfig) +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M +) + +func convertRingHashProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + if !envconfig.XDSRingHash { + return nil, nil + } + rhProto := &v3ringhashpb.RingHash{} + if err := proto.Unmarshal(rawProto, rhProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + if rhProto.GetHashFunction() != v3ringhashpb.RingHash_XX_HASH { + return nil, fmt.Errorf("unsupported ring_hash hash function %v", rhProto.GetHashFunction()) + } + + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhProto.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := rhProto.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + rhCfg := &ringhash.LBConfig{ + MinRingSize: minSize, + MaxRingSize: maxSize, + } + + rhCfgJSON, err := json.Marshal(rhCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", rhCfg, err) + } + return makeBalancerConfigJSON(ringhash.Name, rhCfgJSON), nil +} + +type pfConfig struct { + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func convertPickFirstProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + if !envconfig.PickFirstLBConfig { + return nil, nil + } + pfProto := &v3pickfirstpb.PickFirst{} + if err := proto.Unmarshal(rawProto, pfProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + pfCfg := &pfConfig{ShuffleAddressList: pfProto.GetShuffleAddressList()} + js, err := json.Marshal(pfCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", pfCfg, err) + } + return makeBalancerConfigJSON(grpc.PickFirstBalancerName, js), nil +} + +func convertRoundRobinProtoToServiceConfig([]byte, int) (json.RawMessage, error) { + return makeBalancerConfigJSON(roundrobin.Name, json.RawMessage("{}")), nil +} + +type wrrLocalityLBConfig struct { + ChildPolicy json.RawMessage `json:"childPolicy,omitempty"` +} + +func convertWRRLocalityProtoToServiceConfig(rawProto []byte, depth int) (json.RawMessage, error) { + wrrlProto := &v3wrrlocalitypb.WrrLocality{} + if err := proto.Unmarshal(rawProto, wrrlProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + epJSON, err := xdslbregistry.ConvertToServiceConfig(wrrlProto.GetEndpointPickingPolicy(), depth+1) + if err != nil { + return nil, fmt.Errorf("error converting endpoint picking policy: %v for %+v", err, wrrlProto) + } + wrrLCfg := wrrLocalityLBConfig{ + ChildPolicy: epJSON, + } + + lbCfgJSON, err := json.Marshal(wrrLCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLCfg, err) + } + return makeBalancerConfigJSON(wrrlocality.Name, lbCfgJSON), nil +} + +func convertWeightedRoundRobinProtoToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + cswrrProto := &v3clientsideweightedroundrobinpb.ClientSideWeightedRoundRobin{} + if err := proto.Unmarshal(rawProto, cswrrProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + wrrLBCfg := &wrrLBConfig{} + // Only set fields if specified in proto. If not set, ParseConfig of the WRR + // will populate the config with defaults. + if enableOOBLoadReportCfg := cswrrProto.GetEnableOobLoadReport(); enableOOBLoadReportCfg != nil { + wrrLBCfg.EnableOOBLoadReport = enableOOBLoadReportCfg.GetValue() + } + if oobReportingPeriodCfg := cswrrProto.GetOobReportingPeriod(); oobReportingPeriodCfg != nil { + wrrLBCfg.OOBReportingPeriod = internalserviceconfig.Duration(oobReportingPeriodCfg.AsDuration()) + } + if blackoutPeriodCfg := cswrrProto.GetBlackoutPeriod(); blackoutPeriodCfg != nil { + wrrLBCfg.BlackoutPeriod = internalserviceconfig.Duration(blackoutPeriodCfg.AsDuration()) + } + if weightExpirationPeriodCfg := cswrrProto.GetBlackoutPeriod(); weightExpirationPeriodCfg != nil { + wrrLBCfg.WeightExpirationPeriod = internalserviceconfig.Duration(weightExpirationPeriodCfg.AsDuration()) + } + if weightUpdatePeriodCfg := cswrrProto.GetWeightUpdatePeriod(); weightUpdatePeriodCfg != nil { + wrrLBCfg.WeightUpdatePeriod = internalserviceconfig.Duration(weightUpdatePeriodCfg.AsDuration()) + } + if errorUtilizationPenaltyCfg := cswrrProto.GetErrorUtilizationPenalty(); errorUtilizationPenaltyCfg != nil { + wrrLBCfg.ErrorUtilizationPenalty = float64(errorUtilizationPenaltyCfg.GetValue()) + } + + lbCfgJSON, err := json.Marshal(wrrLBCfg) + if err != nil { + return nil, fmt.Errorf("error marshaling JSON for type %T: %v", wrrLBCfg, err) + } + return makeBalancerConfigJSON(weightedroundrobin.Name, lbCfgJSON), nil +} + +func convertV1TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + tsProto := &v1xdsudpatypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +func convertV3TypedStructToServiceConfig(rawProto []byte, _ int) (json.RawMessage, error) { + tsProto := &v3xdsxdstypepb.TypedStruct{} + if err := proto.Unmarshal(rawProto, tsProto); err != nil { + return nil, fmt.Errorf("failed to unmarshal resource: %v", err) + } + return convertCustomPolicy(tsProto.GetTypeUrl(), tsProto.GetValue()) +} + +// convertCustomPolicy attempts to prepare json configuration for a custom lb +// proto, which specifies the gRPC balancer type and configuration. Returns the +// converted json and an error which should cause caller to error if error +// converting. If both json and error returned are nil, it means the gRPC +// Balancer registry does not contain that balancer type, and the caller should +// continue to the next policy. +func convertCustomPolicy(typeURL string, s *structpb.Struct) (json.RawMessage, error) { + // The gRPC policy name will be the "type name" part of the value of the + // type_url field in the TypedStruct. We get this by using the part after + // the last / character. Can assume a valid type_url from the control plane. + pos := strings.LastIndex(typeURL, "/") + name := typeURL[pos+1:] + + if balancer.Get(name) == nil { + return nil, nil + } + + rawJSON, err := json.Marshal(s) + if err != nil { + return nil, fmt.Errorf("error converting custom lb policy %v: %v for %+v", err, typeURL, s) + } + + // The Struct contained in the TypedStruct will be returned as-is as the + // configuration JSON object. + return makeBalancerConfigJSON(name, rawJSON), nil +} + +type wrrLBConfig struct { + EnableOOBLoadReport bool `json:"enableOobLoadReport,omitempty"` + OOBReportingPeriod internalserviceconfig.Duration `json:"oobReportingPeriod,omitempty"` + BlackoutPeriod internalserviceconfig.Duration `json:"blackoutPeriod,omitempty"` + WeightExpirationPeriod internalserviceconfig.Duration `json:"weightExpirationPeriod,omitempty"` + WeightUpdatePeriod internalserviceconfig.Duration `json:"weightUpdatePeriod,omitempty"` + ErrorUtilizationPenalty float64 `json:"errorUtilizationPenalty,omitempty"` +} + +func makeBalancerConfigJSON(name string, value json.RawMessage) []byte { + return []byte(fmt.Sprintf(`[{%q: %s}]`, name, value)) +} diff --git a/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go b/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go new file mode 100644 index 000000000000..0f3d1df4db20 --- /dev/null +++ b/xds/internal/xdsclient/xdslbregistry/xdslbregistry.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdslbregistry provides a registry of converters that convert proto +// from load balancing configuration, defined by the xDS API spec, to JSON load +// balancing configuration. +package xdslbregistry + +import ( + "encoding/json" + "fmt" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" +) + +var ( + // m is a map from proto type to Converter. + m = make(map[string]Converter) +) + +// Register registers the converter to the map keyed on a proto type. Must be +// called at init time. Not thread safe. +func Register(protoType string, c Converter) { + m[protoType] = c +} + +// SetRegistry sets the xDS LB registry. Must be called at init time. Not thread +// safe. +func SetRegistry(registry map[string]Converter) { + m = registry +} + +// Converter converts raw proto bytes into the internal Go JSON representation +// of the proto passed. Returns the json message, and an error. If both +// returned are nil, it represents continuing to the next proto. +type Converter func([]byte, int) (json.RawMessage, error) + +// ConvertToServiceConfig converts a proto Load Balancing Policy configuration +// into a json string. Returns an error if: +// - no supported policy found +// - there is more than 16 layers of recursion in the configuration +// - a failure occurs when converting the policy +func ConvertToServiceConfig(lbPolicy *v3clusterpb.LoadBalancingPolicy, depth int) (json.RawMessage, error) { + // "Configurations that require more than 16 levels of recursion are + // considered invalid and should result in a NACK response." - A51 + if depth > 15 { + return nil, fmt.Errorf("lb policy %v exceeds max depth supported: 16 layers", lbPolicy) + } + + // "This function iterate over the list of policy messages in + // LoadBalancingPolicy, attempting to convert each one to gRPC form, + // stopping at the first supported policy." - A52 + for _, policy := range lbPolicy.GetPolicies() { + policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl() + converter := m[policy.GetTypedExtensionConfig().GetTypedConfig().GetTypeUrl()] + // "Any entry not in the above list is unsupported and will be skipped." + // - A52 + // This includes Least Request as well, since grpc-go does not support + // the Least Request Load Balancing Policy. + if converter == nil { + continue + } + json, err := converter(policy.GetTypedExtensionConfig().GetTypedConfig().GetValue(), depth) + if json == nil && err == nil { + continue + } + return json, err + } + return nil, fmt.Errorf("no supported policy found in policy list +%v", lbPolicy) +} diff --git a/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go b/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go new file mode 100644 index 000000000000..f1ce5496b794 --- /dev/null +++ b/xds/internal/xdsclient/xdslbregistry/xdslbregistry_test.go @@ -0,0 +1,433 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package xdslbregistry_test contains test cases for the xDS LB Policy Registry. +package xdslbregistry_test + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + _ "google.golang.org/grpc/balancer/roundrobin" + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpctest" + "google.golang.org/grpc/internal/pretty" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + _ "google.golang.org/grpc/xds" // Register the xDS LB Registry Converters. + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + v1xdsudpatypepb "github.com/cncf/xds/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" + v3pickfirstpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/pick_first/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + structpb "github.com/golang/protobuf/ptypes/struct" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +func wrrLocalityBalancerConfig(childPolicy *internalserviceconfig.BalancerConfig) *internalserviceconfig.BalancerConfig { + return &internalserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: childPolicy, + }, + } +} + +func (s) TestConvertToServiceConfigSuccess(t *testing.T) { + const customLBPolicyName = "myorg.MyCustomLeastRequestPolicy" + stub.Register(customLBPolicyName, stub.BalancerFuncs{}) + + tests := []struct { + name string + policy *v3clusterpb.LoadBalancingPolicy + wantConfig string // JSON config + rhDisabled bool + pfDisabled bool + }{ + { + name: "ring_hash", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + wantConfig: `[{"ring_hash_experimental": { "minRingSize": 10, "maxRingSize": 100 }}]`, + }, + { + name: "pick_first_shuffle", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3pickfirstpb.PickFirst{ + ShuffleAddressList: true, + }), + }, + }, + }, + }, + wantConfig: `[{"pick_first": { "shuffleAddressList": true }}]`, + }, + { + name: "pick_first", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3pickfirstpb.PickFirst{}), + }, + }, + }, + }, + wantConfig: `[{"pick_first": { "shuffleAddressList": false }}]`, + }, + { + name: "round_robin", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + wantConfig: `[{"round_robin": {}}]`, + }, + { + name: "round_robin_ring_hash_use_first_supported", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + wantConfig: `[{"round_robin": {}}]`, + }, + { + name: "ring_hash_disabled_rh_rr_use_first_supported", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + wantConfig: `[{"round_robin": {}}]`, + rhDisabled: true, + }, + { + name: "pick_first_disabled_pf_rr_use_first_supported", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3pickfirstpb.PickFirst{ + ShuffleAddressList: true, + }), + }, + }, + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + wantConfig: `[{"round_robin": {}}]`, + pfDisabled: true, + }, + { + name: "custom_lb_type_v3_struct", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + // The type not registered in gRPC Policy registry. + // Should fallback to next policy in list. + TypedConfig: testutils.MarshalAny(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.ThisTypeDoesNotExist", + Value: &structpb.Struct{}, + }), + }, + }, + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", + Value: &structpb.Struct{}, + }), + }, + }, + }, + }, + wantConfig: `[{"myorg.MyCustomLeastRequestPolicy": {}}]`, + }, + { + name: "custom_lb_type_v1_struct", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v1xdsudpatypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", + Value: &structpb.Struct{}, + }), + }, + }, + }, + }, + wantConfig: `[{"myorg.MyCustomLeastRequestPolicy": {}}]`, + }, + { + name: "wrr_locality_child_round_robin", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + wantConfig: `[{"xds_wrr_locality_experimental": { "childPolicy": [{"round_robin": {}}] }}]`, + }, + { + name: "wrr_locality_child_custom_lb_type_v3_struct", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", + Value: &structpb.Struct{}, + }), + }, + }, + }, + }, + wantConfig: `[{"xds_wrr_locality_experimental": { "childPolicy": [{"myorg.MyCustomLeastRequestPolicy": {}}] }}]`, + }, + { + name: "on-the-boundary-of-recursive-limit", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(&v3roundrobinpb.RoundRobin{}))))))))))))))), + }, + }, + }, + }, + wantConfig: jsonMarshal(t, wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(wrrLocalityBalancerConfig(&internalserviceconfig.BalancerConfig{ + Name: "round_robin", + })))))))))))))))), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.rhDisabled { + defer func(old bool) { envconfig.XDSRingHash = old }(envconfig.XDSRingHash) + envconfig.XDSRingHash = false + } + if !test.pfDisabled { + defer func(old bool) { envconfig.PickFirstLBConfig = old }(envconfig.PickFirstLBConfig) + envconfig.PickFirstLBConfig = true + } + rawJSON, err := xdslbregistry.ConvertToServiceConfig(test.policy, 0) + if err != nil { + t.Fatalf("ConvertToServiceConfig(%s) failed: %v", pretty.ToJSON(test.policy), err) + } + // got and want must be unmarshalled since JSON strings shouldn't + // generally be directly compared. + var got []map[string]interface{} + if err := json.Unmarshal(rawJSON, &got); err != nil { + t.Fatalf("Error unmarshalling rawJSON (%q): %v", rawJSON, err) + } + var want []map[string]interface{} + if err := json.Unmarshal(json.RawMessage(test.wantConfig), &want); err != nil { + t.Fatalf("Error unmarshalling wantConfig (%q): %v", test.wantConfig, err) + } + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("ConvertToServiceConfig() got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} + +func jsonMarshal(t *testing.T, x interface{}) string { + t.Helper() + js, err := json.Marshal(x) + if err != nil { + t.Fatalf("Error marshalling to JSON (%+v): %v", x, err) + } + return string(js) +} + +// TestConvertToServiceConfigFailure tests failure cases of the xDS LB registry +// of converting proto configuration to JSON configuration. +func (s) TestConvertToServiceConfigFailure(t *testing.T) { + tests := []struct { + name string + policy *v3clusterpb.LoadBalancingPolicy + wantErr string + }{ + { + name: "not xx_hash function", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_MURMUR_HASH_2, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + wantErr: "unsupported ring_hash hash function", + }, + { + name: "no-supported-policy", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + // The type not registered in gRPC Policy registry. + TypedConfig: testutils.MarshalAny(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.ThisTypeDoesNotExist", + Value: &structpb.Struct{}, + }), + }, + }, + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + // Not supported by gRPC-Go. + TypedConfig: testutils.MarshalAny(&v3leastrequestpb.LeastRequest{}), + }, + }, + }, + }, + wantErr: "no supported policy found in policy list", + }, + { + name: "exceeds-boundary-of-recursive-limit-by-1", + policy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(wrrLocality(&v3roundrobinpb.RoundRobin{})))))))))))))))), + }, + }, + }, + }, + wantErr: "exceeds max depth", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, gotErr := xdslbregistry.ConvertToServiceConfig(test.policy, 0) + // Test the error substring to test the different root causes of + // errors. This is more brittle over time, but it's important to + // test the root cause of the errors emitted from the + // ConvertToServiceConfig function call. Also, this package owns the + // error strings so breakages won't come unexpectedly. + if gotErr == nil || !strings.Contains(gotErr.Error(), test.wantErr) { + t.Fatalf("ConvertToServiceConfig() = %v, wantErr %v", gotErr, test.wantErr) + } + }) + } +} + +// wrrLocality is a helper that takes a proto message and returns a +// WrrLocalityProto with the proto message marshaled into a proto.Any as a +// child. +func wrrLocality(m proto.Message) *v3wrrlocalitypb.WrrLocality { + return &v3wrrlocalitypb.WrrLocality{ + EndpointPickingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(m), + }, + }, + }, + }, + } +} + +// wrrLocalityAny takes a proto message and returns a wrr locality proto +// marshaled as an any with an any child set to the marshaled proto message. +func wrrLocalityAny(m proto.Message) *anypb.Any { + return testutils.MarshalAny(wrrLocality(m)) +} diff --git a/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go new file mode 100644 index 000000000000..183801c1c68c --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -0,0 +1,153 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // ClusterResourceTypeName represents the transport agnostic name for the + // cluster resource. + ClusterResourceTypeName = "ClusterResource" +) + +var ( + // Compile time interface checks. + _ Type = clusterResourceType{} + + // Singleton instantiation of the resource type implementation. + clusterType = clusterResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3ClusterURL, + typeName: ClusterResourceTypeName, + allResourcesRequiredInSotW: true, + }, + } +) + +// clusterResourceType provides the resource-type specific functionality for a +// Cluster resource. +// +// Implements the Type interface. +type clusterResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (clusterResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, cluster, err := unmarshalClusterResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: ClusterUpdate{}}}, err + } + + // Perform extra validation here. + if err := securityConfigValidator(opts.BootstrapConfig, cluster.SecurityCfg); err != nil { + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: ClusterUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &ClusterResourceData{Resource: cluster}}, nil + +} + +// ClusterResourceData wraps the configuration of a Cluster resource as received +// from the management server. +// +// Implements the ResourceData interface. +type ClusterResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource ClusterUpdate +} + +// Equal returns true if other is equal to r. +func (c *ClusterResourceData) Equal(other ResourceData) bool { + if c == nil && other == nil { + return true + } + if (c == nil) != (other == nil) { + return false + } + return proto.Equal(c.Resource.Raw, other.Raw()) +} + +// ToJSON returns a JSON string representation of the resource data. +func (c *ClusterResourceData) ToJSON() string { + return pretty.ToJSON(c.Resource) +} + +// Raw returns the underlying raw protobuf form of the cluster resource. +func (c *ClusterResourceData) Raw() *anypb.Any { + return c.Resource.Raw +} + +// ClusterWatcher wraps the callbacks to be invoked for different events +// corresponding to the cluster resource being watched. +type ClusterWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*ClusterResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +type delegatingClusterWatcher struct { + watcher ClusterWatcher +} + +func (d *delegatingClusterWatcher) OnUpdate(data ResourceData) { + c := data.(*ClusterResourceData) + d.watcher.OnUpdate(c) +} + +func (d *delegatingClusterWatcher) OnError(err error) { + d.watcher.OnError(err) +} + +func (d *delegatingClusterWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() +} + +// WatchCluster uses xDS to discover the configuration associated with the +// provided cluster resource name. +func WatchCluster(p Producer, name string, w ClusterWatcher) (cancel func()) { + delegator := &delegatingClusterWatcher{watcher: w} + return p.WatchResource(clusterType, name, delegator) +} diff --git a/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go new file mode 100644 index 000000000000..775a8aa19423 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -0,0 +1,149 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // EndpointsResourceTypeName represents the transport agnostic name for the + // endpoint resource. + EndpointsResourceTypeName = "EndpointsResource" +) + +var ( + // Compile time interface checks. + _ Type = endpointsResourceType{} + + // Singleton instantiation of the resource type implementation. + endpointsType = endpointsResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3EndpointsURL, + typeName: "EndpointsResource", + allResourcesRequiredInSotW: false, + }, + } +) + +// endpointsResourceType provides the resource-type specific functionality for a +// ClusterLoadAssignment (or Endpoints) resource. +// +// Implements the Type interface. +type endpointsResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (endpointsResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, rc, err := unmarshalEndpointsResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &EndpointsResourceData{Resource: EndpointsUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &EndpointsResourceData{Resource: rc}}, nil + +} + +// EndpointsResourceData wraps the configuration of an Endpoints resource as +// received from the management server. +// +// Implements the ResourceData interface. +type EndpointsResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource EndpointsUpdate +} + +// Equal returns true if other is equal to r. +func (e *EndpointsResourceData) Equal(other ResourceData) bool { + if e == nil && other == nil { + return true + } + if (e == nil) != (other == nil) { + return false + } + return proto.Equal(e.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (e *EndpointsResourceData) ToJSON() string { + return pretty.ToJSON(e.Resource) +} + +// Raw returns the underlying raw protobuf form of the listener resource. +func (e *EndpointsResourceData) Raw() *anypb.Any { + return e.Resource.Raw +} + +// EndpointsWatcher wraps the callbacks to be invoked for different +// events corresponding to the endpoints resource being watched. +type EndpointsWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*EndpointsResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +type delegatingEndpointsWatcher struct { + watcher EndpointsWatcher +} + +func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData) { + e := data.(*EndpointsResourceData) + d.watcher.OnUpdate(e) +} + +func (d *delegatingEndpointsWatcher) OnError(err error) { + d.watcher.OnError(err) +} + +func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() +} + +// WatchEndpoints uses xDS to discover the configuration associated with the +// provided endpoints resource name. +func WatchEndpoints(p Producer, name string, w EndpointsWatcher) (cancel func()) { + delegator := &delegatingEndpointsWatcher{watcher: w} + return p.WatchResource(endpointsType, name, delegator) +} diff --git a/xds/internal/client/errors.go b/xds/internal/xdsclient/xdsresource/errors.go similarity index 82% rename from xds/internal/client/errors.go rename to xds/internal/xdsclient/xdsresource/errors.go index 34ae2738db00..00ef9310481a 100644 --- a/xds/internal/client/errors.go +++ b/xds/internal/xdsclient/xdsresource/errors.go @@ -16,7 +16,7 @@ * */ -package client +package xdsresource import "fmt" @@ -34,6 +34,12 @@ const ( // response. It's typically returned if the resource is removed in the xds // server. ErrorTypeResourceNotFound + // ErrorTypeResourceTypeUnsupported indicates the receipt of a message from + // the management server with resources of an unsupported resource type. + ErrorTypeResourceTypeUnsupported + // ErrTypeStreamFailedAfterRecv indicates an ADS stream error, after + // successful receipt of at least one message from the server. + ErrTypeStreamFailedAfterRecv ) type xdsClientError struct { diff --git a/xds/internal/xdsclient/xdsresource/filter_chain.go b/xds/internal/xdsclient/xdsresource/filter_chain.go new file mode 100644 index 000000000000..0390412fdc89 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -0,0 +1,869 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "net" + + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" +) + +const ( + // Used as the map key for unspecified prefixes. The actual value of this + // key is immaterial. + unspecifiedPrefixMapKey = "unspecified" + + // An unspecified destination or source prefix should be considered a less + // specific match than a wildcard prefix, `0.0.0.0/0` or `::/0`. Also, an + // unspecified prefix should match most v4 and v6 addresses compared to the + // wildcard prefixes which match only a specific network (v4 or v6). + // + // We use these constants when looking up the most specific prefix match. A + // wildcard prefix will match 0 bits, and to make sure that a wildcard + // prefix is considered a more specific match than an unspecified prefix, we + // use a value of -1 for the latter. + noPrefixMatch = -2 + unspecifiedPrefixMatch = -1 +) + +// FilterChain captures information from within a FilterChain message in a +// Listener resource. +type FilterChain struct { + // SecurityCfg contains transport socket security configuration. + SecurityCfg *SecurityConfig + // HTTPFilters represent the HTTP Filters that comprise this FilterChain. + HTTPFilters []HTTPFilter + // RouteConfigName is the route configuration name for this FilterChain. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned for this filter chain. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate +} + +// VirtualHostWithInterceptors captures information present in a VirtualHost +// update, and also contains routes with instantiated HTTP Filters. +type VirtualHostWithInterceptors struct { + // Domains are the domain names which map to this Virtual Host. On the + // server side, this will be dictated by the :authority header of the + // incoming RPC. + Domains []string + // Routes are the Routes for this Virtual Host. + Routes []RouteWithInterceptors +} + +// RouteWithInterceptors captures information in a Route, and contains +// a usable matcher and also instantiated HTTP Filters. +type RouteWithInterceptors struct { + // M is the matcher used to match to this route. + M *CompositeMatcher + // ActionType is the type of routing action to initiate once matched to. + ActionType RouteActionType + // Interceptors are interceptors instantiated for this route. These will be + // constructed from a combination of the top level configuration and any + // HTTP Filter overrides present in Virtual Host or Route. + Interceptors []resolver.ServerInterceptor +} + +// ConstructUsableRouteConfiguration takes Route Configuration and converts it +// into matchable route configuration, with instantiated HTTP Filters per route. +func (f *FilterChain) ConstructUsableRouteConfiguration(config RouteConfigUpdate) ([]VirtualHostWithInterceptors, error) { + vhs := make([]VirtualHostWithInterceptors, len(config.VirtualHosts)) + for _, vh := range config.VirtualHosts { + vhwi, err := f.convertVirtualHost(vh) + if err != nil { + return nil, fmt.Errorf("virtual host construction: %v", err) + } + vhs = append(vhs, vhwi) + } + return vhs, nil +} + +func (f *FilterChain) convertVirtualHost(virtualHost *VirtualHost) (VirtualHostWithInterceptors, error) { + rs := make([]RouteWithInterceptors, len(virtualHost.Routes)) + for i, r := range virtualHost.Routes { + var err error + rs[i].ActionType = r.ActionType + rs[i].M, err = RouteToMatcher(r) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("matcher construction: %v", err) + } + for _, filter := range f.HTTPFilters { + // Route is highest priority on server side, as there is no concept + // of an upstream cluster on server side. + override := r.HTTPFilterConfigOverride[filter.Name] + if override == nil { + // Virtual Host is second priority. + override = virtualHost.HTTPFilterConfigOverride[filter.Name] + } + sb, ok := filter.Filter.(httpfilter.ServerInterceptorBuilder) + if !ok { + // Should not happen if it passed xdsClient validation. + return VirtualHostWithInterceptors{}, fmt.Errorf("filter does not support use in server") + } + si, err := sb.BuildServerInterceptor(filter.Config, override) + if err != nil { + return VirtualHostWithInterceptors{}, fmt.Errorf("filter construction: %v", err) + } + if si != nil { + rs[i].Interceptors = append(rs[i].Interceptors, si) + } + } + } + return VirtualHostWithInterceptors{Domains: virtualHost.Domains, Routes: rs}, nil +} + +// SourceType specifies the connection source IP match type. +type SourceType int + +const ( + // SourceTypeAny matches connection attempts from any source. + SourceTypeAny SourceType = iota + // SourceTypeSameOrLoopback matches connection attempts from the same host. + SourceTypeSameOrLoopback + // SourceTypeExternal matches connection attempts from a different host. + SourceTypeExternal +) + +// FilterChainManager contains all the match criteria specified through all +// filter chains in a single Listener resource. It also contains the default +// filter chain specified in the Listener resource. It provides two important +// pieces of functionality: +// 1. Validate the filter chains in an incoming Listener resource to make sure +// that there aren't filter chains which contain the same match criteria. +// 2. As part of performing the above validation, it builds an internal data +// structure which will if used to look up the matching filter chain at +// connection time. +// +// The logic specified in the documentation around the xDS FilterChainMatch +// proto mentions 8 criteria to match on. +// The following order applies: +// +// 1. Destination port. +// 2. Destination IP address. +// 3. Server name (e.g. SNI for TLS protocol), +// 4. Transport protocol. +// 5. Application protocols (e.g. ALPN for TLS protocol). +// 6. Source type (e.g. any, local or external network). +// 7. Source IP address. +// 8. Source port. +type FilterChainManager struct { + // Destination prefix is the first match criteria that we support. + // Therefore, this multi-stage map is indexed on destination prefixes + // specified in the match criteria. + // Unspecified destination prefix matches end up as a wildcard entry here + // with a key of 0.0.0.0/0. + dstPrefixMap map[string]*destPrefixEntry + + // At connection time, we do not have the actual destination prefix to match + // on. We only have the real destination address of the incoming connection. + // This means that we cannot use the above map at connection time. This list + // contains the map entries from the above map that we can use at connection + // time to find matching destination prefixes in O(n) time. + // + // TODO: Implement LC-trie to support logarithmic time lookups. If that + // involves too much time/effort, sort this slice based on the netmask size. + dstPrefixes []*destPrefixEntry + + def *FilterChain // Default filter chain, if specified. + + // RouteConfigNames are the route configuration names which need to be + // dynamically queried for RDS Configuration for any FilterChains which + // specify to load RDS Configuration dynamically. + RouteConfigNames map[string]bool +} + +// destPrefixEntry is the value type of the map indexed on destination prefixes. +type destPrefixEntry struct { + // The actual destination prefix. Set to nil for unspecified prefixes. + net *net.IPNet + // We need to keep track of the transport protocols seen as part of the + // config validation (and internal structure building) phase. The only two + // values that we support are empty string and "raw_buffer", with the latter + // taking preference. Once we have seen one filter chain with "raw_buffer", + // we can drop everything other filter chain with an empty transport + // protocol. + rawBufferSeen bool + // For each specified source type in the filter chain match criteria, this + // array points to the set of specified source prefixes. + // Unspecified source type matches end up as a wildcard entry here with an + // index of 0, which actually represents the source type `ANY`. + srcTypeArr sourceTypesArray +} + +// An array for the fixed number of source types that we have. +type sourceTypesArray [3]*sourcePrefixes + +// sourcePrefixes contains source prefix related information specified in the +// match criteria. These are pointed to by the array of source types. +type sourcePrefixes struct { + // These are very similar to the 'dstPrefixMap' and 'dstPrefixes' field of + // FilterChainManager. Go there for more info. + srcPrefixMap map[string]*sourcePrefixEntry + srcPrefixes []*sourcePrefixEntry +} + +// sourcePrefixEntry contains match criteria per source prefix. +type sourcePrefixEntry struct { + // The actual destination prefix. Set to nil for unspecified prefixes. + net *net.IPNet + // Mapping from source ports specified in the match criteria to the actual + // filter chain. Unspecified source port matches en up as a wildcard entry + // here with a key of 0. + srcPortMap map[int]*FilterChain +} + +// NewFilterChainManager parses the received Listener resource and builds a +// FilterChainManager. Returns a non-nil error on validation failures. +// +// This function is only exported so that tests outside of this package can +// create a FilterChainManager. +func NewFilterChainManager(lis *v3listenerpb.Listener) (*FilterChainManager, error) { + // Parse all the filter chains and build the internal data structures. + fci := &FilterChainManager{ + dstPrefixMap: make(map[string]*destPrefixEntry), + RouteConfigNames: make(map[string]bool), + } + if err := fci.addFilterChains(lis.GetFilterChains()); err != nil { + return nil, err + } + // Build the source and dest prefix slices used by Lookup(). + fcSeen := false + for _, dstPrefix := range fci.dstPrefixMap { + fci.dstPrefixes = append(fci.dstPrefixes, dstPrefix) + for _, st := range dstPrefix.srcTypeArr { + if st == nil { + continue + } + for _, srcPrefix := range st.srcPrefixMap { + st.srcPrefixes = append(st.srcPrefixes, srcPrefix) + for _, fc := range srcPrefix.srcPortMap { + if fc != nil { + fcSeen = true + } + } + } + } + } + + // Retrieve the default filter chain. The match criteria specified on the + // default filter chain is never used. The default filter chain simply gets + // used when none of the other filter chains match. + var def *FilterChain + if dfc := lis.GetDefaultFilterChain(); dfc != nil { + var err error + if def, err = fci.filterChainFromProto(dfc); err != nil { + return nil, err + } + } + fci.def = def + + // If there are no supported filter chains and no default filter chain, we + // fail here. This will call the Listener resource to be NACK'ed. + if !fcSeen && fci.def == nil { + return nil, fmt.Errorf("no supported filter chains and no default filter chain") + } + return fci, nil +} + +// addFilterChains parses the filter chains in fcs and adds the required +// internal data structures corresponding to the match criteria. +func (fci *FilterChainManager) addFilterChains(fcs []*v3listenerpb.FilterChain) error { + for _, fc := range fcs { + fcm := fc.GetFilterChainMatch() + if fcm.GetDestinationPort().GetValue() != 0 { + // Destination port is the first match criteria and we do not + // support filter chains which contains this match criteria. + logger.Warningf("Dropping filter chain %+v since it contains unsupported destination_port match field", fc) + continue + } + + // Build the internal representation of the filter chain match fields. + if err := fci.addFilterChainsForDestPrefixes(fc); err != nil { + return err + } + } + + return nil +} + +func (fci *FilterChainManager) addFilterChainsForDestPrefixes(fc *v3listenerpb.FilterChain) error { + ranges := fc.GetFilterChainMatch().GetPrefixRanges() + dstPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range ranges { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse destination prefix range: %+v", pr) + } + dstPrefixes = append(dstPrefixes, ipnet) + } + + if len(dstPrefixes) == 0 { + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + if fci.dstPrefixMap[unspecifiedPrefixMapKey] == nil { + fci.dstPrefixMap[unspecifiedPrefixMapKey] = &destPrefixEntry{} + } + return fci.addFilterChainsForServerNames(fci.dstPrefixMap[unspecifiedPrefixMapKey], fc) + } + for _, prefix := range dstPrefixes { + p := prefix.String() + if fci.dstPrefixMap[p] == nil { + fci.dstPrefixMap[p] = &destPrefixEntry{net: prefix} + } + if err := fci.addFilterChainsForServerNames(fci.dstPrefixMap[p], fc); err != nil { + return err + } + } + return nil +} + +func (fci *FilterChainManager) addFilterChainsForServerNames(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + // Filter chains specifying server names in their match criteria always fail + // a match at connection time. So, these filter chains can be dropped now. + if len(fc.GetFilterChainMatch().GetServerNames()) != 0 { + logger.Warningf("Dropping filter chain %+v since it contains unsupported server_names match field", fc) + return nil + } + + return fci.addFilterChainsForTransportProtocols(dstEntry, fc) +} + +func (fci *FilterChainManager) addFilterChainsForTransportProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + tp := fc.GetFilterChainMatch().GetTransportProtocol() + switch { + case tp != "" && tp != "raw_buffer": + // Only allow filter chains with transport protocol set to empty string + // or "raw_buffer". + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + return nil + case tp == "" && dstEntry.rawBufferSeen: + // If we have already seen filter chains with transport protocol set to + // "raw_buffer", we can drop filter chains with transport protocol set + // to empty string, since the former takes precedence. + logger.Warningf("Dropping filter chain %+v since it contains unsupported value for transport_protocols match field", fc) + return nil + case tp != "" && !dstEntry.rawBufferSeen: + // This is the first "raw_buffer" that we are seeing. Set the bit and + // reset the source types array which might contain entries for filter + // chains with transport protocol set to empty string. + dstEntry.rawBufferSeen = true + dstEntry.srcTypeArr = sourceTypesArray{} + } + return fci.addFilterChainsForApplicationProtocols(dstEntry, fc) +} + +func (fci *FilterChainManager) addFilterChainsForApplicationProtocols(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + if len(fc.GetFilterChainMatch().GetApplicationProtocols()) != 0 { + logger.Warningf("Dropping filter chain %+v since it contains unsupported application_protocols match field", fc) + return nil + } + return fci.addFilterChainsForSourceType(dstEntry, fc) +} + +// addFilterChainsForSourceType adds source types to the internal data +// structures and delegates control to addFilterChainsForSourcePrefixes to +// continue building the internal data structure. +func (fci *FilterChainManager) addFilterChainsForSourceType(dstEntry *destPrefixEntry, fc *v3listenerpb.FilterChain) error { + var srcType SourceType + switch st := fc.GetFilterChainMatch().GetSourceType(); st { + case v3listenerpb.FilterChainMatch_ANY: + srcType = SourceTypeAny + case v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK: + srcType = SourceTypeSameOrLoopback + case v3listenerpb.FilterChainMatch_EXTERNAL: + srcType = SourceTypeExternal + default: + return fmt.Errorf("unsupported source type: %v", st) + } + + st := int(srcType) + if dstEntry.srcTypeArr[st] == nil { + dstEntry.srcTypeArr[st] = &sourcePrefixes{srcPrefixMap: make(map[string]*sourcePrefixEntry)} + } + return fci.addFilterChainsForSourcePrefixes(dstEntry.srcTypeArr[st].srcPrefixMap, fc) +} + +// addFilterChainsForSourcePrefixes adds source prefixes to the internal data +// structures and delegates control to addFilterChainsForSourcePorts to continue +// building the internal data structure. +func (fci *FilterChainManager) addFilterChainsForSourcePrefixes(srcPrefixMap map[string]*sourcePrefixEntry, fc *v3listenerpb.FilterChain) error { + ranges := fc.GetFilterChainMatch().GetSourcePrefixRanges() + srcPrefixes := make([]*net.IPNet, 0, len(ranges)) + for _, pr := range fc.GetFilterChainMatch().GetSourcePrefixRanges() { + cidr := fmt.Sprintf("%s/%d", pr.GetAddressPrefix(), pr.GetPrefixLen().GetValue()) + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("failed to parse source prefix range: %+v", pr) + } + srcPrefixes = append(srcPrefixes, ipnet) + } + + if len(srcPrefixes) == 0 { + // Use the unspecified entry when destination prefix is unspecified, and + // set the `net` field to nil. + if srcPrefixMap[unspecifiedPrefixMapKey] == nil { + srcPrefixMap[unspecifiedPrefixMapKey] = &sourcePrefixEntry{ + srcPortMap: make(map[int]*FilterChain), + } + } + return fci.addFilterChainsForSourcePorts(srcPrefixMap[unspecifiedPrefixMapKey], fc) + } + for _, prefix := range srcPrefixes { + p := prefix.String() + if srcPrefixMap[p] == nil { + srcPrefixMap[p] = &sourcePrefixEntry{ + net: prefix, + srcPortMap: make(map[int]*FilterChain), + } + } + if err := fci.addFilterChainsForSourcePorts(srcPrefixMap[p], fc); err != nil { + return err + } + } + return nil +} + +// addFilterChainsForSourcePorts adds source ports to the internal data +// structures and completes the process of building the internal data structure. +// It is here that we determine if there are multiple filter chains with +// overlapping matching rules. +func (fci *FilterChainManager) addFilterChainsForSourcePorts(srcEntry *sourcePrefixEntry, fcProto *v3listenerpb.FilterChain) error { + ports := fcProto.GetFilterChainMatch().GetSourcePorts() + srcPorts := make([]int, 0, len(ports)) + for _, port := range ports { + srcPorts = append(srcPorts, int(port)) + } + + fc, err := fci.filterChainFromProto(fcProto) + if err != nil { + return err + } + + if len(srcPorts) == 0 { + // Use the wildcard port '0', when source ports are unspecified. + if curFC := srcEntry.srcPortMap[0]; curFC != nil { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + srcEntry.srcPortMap[0] = fc + return nil + } + for _, port := range srcPorts { + if curFC := srcEntry.srcPortMap[port]; curFC != nil { + return errors.New("multiple filter chains with overlapping matching rules are defined") + } + srcEntry.srcPortMap[port] = fc + } + return nil +} + +// filterChainFromProto extracts the relevant information from the FilterChain +// proto and stores it in our internal representation. It also persists any +// RouteNames which need to be queried dynamically via RDS. +func (fci *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain) (*FilterChain, error) { + filterChain, err := processNetworkFilters(fc.GetFilters()) + if err != nil { + return nil, err + } + // These route names will be dynamically queried via RDS in the wrapped + // listener, which receives the LDS response, if specified for the filter + // chain. + if filterChain.RouteConfigName != "" { + fci.RouteConfigNames[filterChain.RouteConfigName] = true + } + // If the transport_socket field is not specified, it means that the control + // plane has not sent us any security config. This is fine and the server + // will use the fallback credentials configured as part of the + // xdsCredentials. + ts := fc.GetTransportSocket() + if ts == nil { + return filterChain, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + any := ts.GetTypedConfig() + if any == nil || any.TypeUrl != version.V3DownstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + } + downstreamCtx := &v3tlspb.DownstreamTlsContext{} + if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) + } + if downstreamCtx.GetRequireSni().GetValue() { + return nil, fmt.Errorf("require_sni field set to true in DownstreamTlsContext message: %v", downstreamCtx) + } + if downstreamCtx.GetOcspStaplePolicy() != v3tlspb.DownstreamTlsContext_LENIENT_STAPLING { + return nil, fmt.Errorf("ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message: %v", downstreamCtx) + } + // The following fields from `DownstreamTlsContext` are ignore: + // - disable_stateless_session_resumption + // - session_ticket_keys + // - session_ticket_keys_sds_secret_config + // - session_timeout + if downstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("DownstreamTlsContext in LDS response does not contain a CommonTlsContext") + } + sc, err := securityConfigFromCommonTLSContext(downstreamCtx.GetCommonTlsContext(), true) + if err != nil { + return nil, err + } + if sc == nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + return filterChain, nil + } + sc.RequireClientCert = downstreamCtx.GetRequireClientCertificate().GetValue() + if sc.RequireClientCert && sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set") + } + filterChain.SecurityCfg = sc + return filterChain, nil +} + +// Validate takes a function to validate the FilterChains in this manager. +func (fci *FilterChainManager) Validate(f func(fc *FilterChain) error) error { + for _, dst := range fci.dstPrefixMap { + for _, srcType := range dst.srcTypeArr { + if srcType == nil { + continue + } + for _, src := range srcType.srcPrefixMap { + for _, fc := range src.srcPortMap { + if err := f(fc); err != nil { + return err + } + } + } + } + } + return f(fci.def) +} + +func processNetworkFilters(filters []*v3listenerpb.Filter) (*FilterChain, error) { + filterChain := &FilterChain{} + seenNames := make(map[string]bool, len(filters)) + seenHCM := false + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, fmt.Errorf("network filters {%+v} is missing name field in filter: {%+v}", filters, filter) + } + if seenNames[name] { + return nil, fmt.Errorf("network filters {%+v} has duplicate filter name %q", filters, name) + } + seenNames[name] = true + + // Network filters have a oneof field named `config_type` where we + // only support `TypedConfig` variant. + switch typ := filter.GetConfigType().(type) { + case *v3listenerpb.Filter_TypedConfig: + // The typed_config field has an `anypb.Any` proto which could + // directly contain the serialized bytes of the actual filter + // configuration, or it could be encoded as a `TypedStruct`. + // TODO: Add support for `TypedStruct`. + tc := filter.GetTypedConfig() + + // The only network filter that we currently support is the v3 + // HttpConnectionManager. So, we can directly check the type_url + // and unmarshal the config. + // TODO: Implement a registry of supported network filters (like + // we have for HTTP filters), when we have to support network + // filters other than HttpConnectionManager. + if tc.GetTypeUrl() != version.V3HTTPConnManagerURL { + return nil, fmt.Errorf("network filters {%+v} has unsupported network filter %q in filter {%+v}", filters, tc.GetTypeUrl(), filter) + } + hcm := &v3httppb.HttpConnectionManager{} + if err := ptypes.UnmarshalAny(tc, hcm); err != nil { + return nil, fmt.Errorf("network filters {%+v} failed unmarshaling of network filter {%+v}: %v", filters, filter, err) + } + // "Any filters after HttpConnectionManager should be ignored during + // connection processing but still be considered for validity. + // HTTPConnectionManager must have valid http_filters." - A36 + filters, err := processHTTPFilters(hcm.GetHttpFilters(), true) + if err != nil { + return nil, fmt.Errorf("network filters {%+v} had invalid server side HTTP Filters {%+v}: %v", filters, hcm.GetHttpFilters(), err) + } + if !seenHCM { + // Validate for RBAC in only the HCM that will be used, since this isn't a logical validation failure, + // it's simply a validation to support RBAC HTTP Filter. + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if hcm.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", hcm) + } + if len(hcm.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", hcm) + } + + // TODO: Implement terminal filter logic, as per A36. + filterChain.HTTPFilters = filters + seenHCM = true + if !envconfig.XDSRBAC { + continue + } + switch hcm.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if hcm.GetRds().GetConfigSource().GetAds() == nil { + return nil, fmt.Errorf("ConfigSource is not ADS: %+v", hcm) + } + name := hcm.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", hcm) + } + filterChain.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + // "RouteConfiguration validation logic inherits all + // previous validations made for client-side usage as RDS + // does not distinguish between client-side and + // server-side." - A36 + // Can specify v3 here, as will never get to this function + // if v2. + routeU, err := generateRDSUpdateFromRouteConfiguration(hcm.GetRouteConfig()) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + filterChain.InlineRouteConfig = &routeU + case nil: + return nil, fmt.Errorf("no RouteSpecifier: %+v", hcm) + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", hcm.RouteSpecifier) + } + } + default: + return nil, fmt.Errorf("network filters {%+v} has unsupported config_type %T in filter %s", filters, typ, filter.GetName()) + } + } + if !seenHCM { + return nil, fmt.Errorf("network filters {%+v} missing HttpConnectionManager filter", filters) + } + return filterChain, nil +} + +// FilterChainLookupParams wraps parameters to be passed to Lookup. +type FilterChainLookupParams struct { + // IsUnspecified indicates whether the server is listening on a wildcard + // address, "0.0.0.0" for IPv4 and "::" for IPv6. Only when this is set to + // true, do we consider the destination prefixes specified in the filter + // chain match criteria. + IsUnspecifiedListener bool + // DestAddr is the local address of an incoming connection. + DestAddr net.IP + // SourceAddr is the remote address of an incoming connection. + SourceAddr net.IP + // SourcePort is the remote port of an incoming connection. + SourcePort int +} + +// Lookup returns the most specific matching filter chain to be used for an +// incoming connection on the server side. +// +// Returns a non-nil error if no matching filter chain could be found or +// multiple matching filter chains were found, and in both cases, the incoming +// connection must be dropped. +func (fci *FilterChainManager) Lookup(params FilterChainLookupParams) (*FilterChain, error) { + dstPrefixes := filterByDestinationPrefixes(fci.dstPrefixes, params.IsUnspecifiedListener, params.DestAddr) + if len(dstPrefixes) == 0 { + if fci.def != nil { + return fci.def, nil + } + return nil, fmt.Errorf("no matching filter chain based on destination prefix match for %+v", params) + } + + srcType := SourceTypeExternal + if params.SourceAddr.Equal(params.DestAddr) || params.SourceAddr.IsLoopback() { + srcType = SourceTypeSameOrLoopback + } + srcPrefixes := filterBySourceType(dstPrefixes, srcType) + if len(srcPrefixes) == 0 { + if fci.def != nil { + return fci.def, nil + } + return nil, fmt.Errorf("no matching filter chain based on source type match for %+v", params) + } + srcPrefixEntry, err := filterBySourcePrefixes(srcPrefixes, params.SourceAddr) + if err != nil { + return nil, err + } + if fc := filterBySourcePorts(srcPrefixEntry, params.SourcePort); fc != nil { + return fc, nil + } + if fci.def != nil { + return fci.def, nil + } + return nil, fmt.Errorf("no matching filter chain after all match criteria for %+v", params) +} + +// filterByDestinationPrefixes is the first stage of the filter chain +// matching algorithm. It takes the complete set of configured filter chain +// matchers and returns the most specific matchers based on the destination +// prefix match criteria (the prefixes which match the most number of bits). +func filterByDestinationPrefixes(dstPrefixes []*destPrefixEntry, isUnspecified bool, dstAddr net.IP) []*destPrefixEntry { + if !isUnspecified { + // Destination prefix matchers are considered only when the listener is + // bound to the wildcard address. + return dstPrefixes + } + + var matchingDstPrefixes []*destPrefixEntry + maxSubnetMatch := noPrefixMatch + for _, prefix := range dstPrefixes { + if prefix.net != nil && !prefix.net.Contains(dstAddr) { + // Skip prefixes which don't match. + continue + } + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingDstPrefixes = make([]*destPrefixEntry, 0, 1) + } + matchingDstPrefixes = append(matchingDstPrefixes, prefix) + } + return matchingDstPrefixes +} + +// filterBySourceType is the second stage of the matching algorithm. It +// trims the filter chains based on the most specific source type match. +func filterBySourceType(dstPrefixes []*destPrefixEntry, srcType SourceType) []*sourcePrefixes { + var ( + srcPrefixes []*sourcePrefixes + bestSrcTypeMatch int + ) + for _, prefix := range dstPrefixes { + var ( + srcPrefix *sourcePrefixes + match int + ) + switch srcType { + case SourceTypeExternal: + match = int(SourceTypeExternal) + srcPrefix = prefix.srcTypeArr[match] + case SourceTypeSameOrLoopback: + match = int(SourceTypeSameOrLoopback) + srcPrefix = prefix.srcTypeArr[match] + } + if srcPrefix == nil { + match = int(SourceTypeAny) + srcPrefix = prefix.srcTypeArr[match] + } + if match < bestSrcTypeMatch { + continue + } + if match > bestSrcTypeMatch { + bestSrcTypeMatch = match + srcPrefixes = make([]*sourcePrefixes, 0) + } + if srcPrefix != nil { + // The source type array always has 3 entries, but these could be + // nil if the appropriate source type match was not specified. + srcPrefixes = append(srcPrefixes, srcPrefix) + } + } + return srcPrefixes +} + +// filterBySourcePrefixes is the third stage of the filter chain matching +// algorithm. It trims the filter chains based on the source prefix. At most one +// filter chain with the most specific match progress to the next stage. +func filterBySourcePrefixes(srcPrefixes []*sourcePrefixes, srcAddr net.IP) (*sourcePrefixEntry, error) { + var matchingSrcPrefixes []*sourcePrefixEntry + maxSubnetMatch := noPrefixMatch + for _, sp := range srcPrefixes { + for _, prefix := range sp.srcPrefixes { + if prefix.net != nil && !prefix.net.Contains(srcAddr) { + // Skip prefixes which don't match. + continue + } + // For unspecified prefixes, since we do not store a real net.IPNet + // inside prefix, we do not perform a match. Instead we simply set + // the matchSize to -1, which is less than the matchSize (0) for a + // wildcard prefix. + matchSize := unspecifiedPrefixMatch + if prefix.net != nil { + matchSize, _ = prefix.net.Mask.Size() + } + if matchSize < maxSubnetMatch { + continue + } + if matchSize > maxSubnetMatch { + maxSubnetMatch = matchSize + matchingSrcPrefixes = make([]*sourcePrefixEntry, 0, 1) + } + matchingSrcPrefixes = append(matchingSrcPrefixes, prefix) + } + } + if len(matchingSrcPrefixes) == 0 { + // Finding no match is not an error condition. The caller will end up + // using the default filter chain if one was configured. + return nil, nil + } + // We expect at most a single matching source prefix entry at this point. If + // we have multiple entries here, and some of their source port matchers had + // wildcard entries, we could be left with more than one matching filter + // chain and hence would have been flagged as an invalid configuration at + // config validation time. + if len(matchingSrcPrefixes) != 1 { + return nil, errors.New("multiple matching filter chains") + } + return matchingSrcPrefixes[0], nil +} + +// filterBySourcePorts is the last stage of the filter chain matching +// algorithm. It trims the filter chains based on the source ports. +func filterBySourcePorts(spe *sourcePrefixEntry, srcPort int) *FilterChain { + if spe == nil { + return nil + } + // A match could be a wildcard match (this happens when the match + // criteria does not specify source ports) or a specific port match (this + // happens when the match criteria specifies a set of ports and the source + // port of the incoming connection matches one of the specified ports). The + // latter is considered to be a more specific match. + if fc := spe.srcPortMap[srcPort]; fc != nil { + return fc + } + if fc := spe.srcPortMap[0]; fc != nil { + return fc + } + return nil +} diff --git a/xds/internal/xdsclient/xdsresource/filter_chain_test.go b/xds/internal/xdsclient/xdsresource/filter_chain_test.go new file mode 100644 index 000000000000..4edf9ce006f1 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/filter_chain_test.go @@ -0,0 +1,2972 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "context" + "errors" + "fmt" + "net" + "strings" + "testing" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3routerpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + + "google.golang.org/grpc/internal/envconfig" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/httpfilter/router" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" +) + +const ( + topLevel = "top level" + vhLevel = "virtual host level" + rLevel = "route level" +) + +var ( + routeConfig = &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}} + inlineRouteConfig = &RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*Route{{Prefix: newStringP("/"), ActionType: RouteActionNonForwardingAction}}, + }}} + emptyValidNetworkFilters = []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + } + validServerSideHTTPFilter1 = &v3httppb.HttpFilter{ + Name: "serverOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, + } + validServerSideHTTPFilter2 = &v3httppb.HttpFilter{ + Name: "serverOnlyCustomFilter2", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, + } + emptyRouterFilter = e2e.RouterHTTPFilter + routerBuilder = httpfilter.Get(router.TypeURL) + routerConfig, _ = routerBuilder.ParseFilterConfig(testutils.MarshalAny(&v3routerpb.Router{})) + routerFilter = HTTPFilter{Name: "router", Filter: routerBuilder, Config: routerConfig} + routerFilterList = []HTTPFilter{routerFilter} +) + +// TestNewFilterChainImpl_Failure_BadMatchFields verifies cases where we have a +// single filter chain with match criteria that contains unsupported fields. +func (s) TestNewFilterChainImpl_Failure_BadMatchFields(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + }{ + { + desc: "unsupported destination port field", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{DestinationPort: &wrapperspb.UInt32Value{Value: 666}}, + }, + }, + }, + }, + { + desc: "unsupported server names field", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ServerNames: []string{"example-server"}}, + }, + }, + }, + }, + { + desc: "unsupported transport protocol ", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{TransportProtocol: "tls"}, + }, + }, + }, + }, + { + desc: "unsupported application protocol field", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ApplicationProtocols: []string{"h2"}}, + }, + }, + }, + }, + { + desc: "bad dest address prefix", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{{AddressPrefix: "a.b.c.d"}}}, + }, + }, + }, + }, + { + desc: "bad dest prefix length", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.1.1.0", 50)}}, + }, + }, + }, + }, + { + desc: "bad source address prefix", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePrefixRanges: []*v3corepb.CidrRange{{AddressPrefix: "a.b.c.d"}}}, + }, + }, + }, + }, + { + desc: "bad source prefix length", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.1.1.0", 50)}}, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + if fci, err := NewFilterChainManager(test.lis); err == nil { + t.Fatalf("NewFilterChainManager() returned %v when expected to fail", fci) + } + }) + } +} + +// TestNewFilterChainImpl_Failure_OverlappingMatchingRules verifies cases where +// there are multiple filter chains and they have overlapping match rules. +func (s) TestNewFilterChainImpl_Failure_OverlappingMatchingRules(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + }{ + { + desc: "matching destination prefixes with no other matchers", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16), cidrRangeFromAddressAndPrefixLen("10.0.0.0", 0)}, + }, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.2.2", 16)}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + }, + { + desc: "matching source type", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_ANY}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_EXTERNAL}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_EXTERNAL}, + Filters: emptyValidNetworkFilters, + }, + }, + }, + }, + { + desc: "matching source prefixes", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16), cidrRangeFromAddressAndPrefixLen("10.0.0.0", 0)}, + }, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.2.2", 16)}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + }, + { + desc: "matching source ports", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3, 4, 5}}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{5, 6, 7}}, + Filters: emptyValidNetworkFilters, + }, + }, + }, + }, + } + + const wantErr = "multiple filter chains with overlapping matching rules are defined" + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + if _, err := NewFilterChainManager(test.lis); err == nil || !strings.Contains(err.Error(), wantErr) { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, wantErr) + } + }) + } +} + +// TestNewFilterChainImpl_Failure_BadSecurityConfig verifies cases where the +// security configuration in the filter chain is invalid. +func (s) TestNewFilterChainImpl_Failure_BadSecurityConfig(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + wantErr string + }{ + { + desc: "no filter chains", + lis: &v3listenerpb.Listener{}, + wantErr: "no supported filter chains and no default filter chain", + }, + { + desc: "unexpected transport socket name", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{Name: "unsupported-transport-socket-name"}, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "transport_socket field has unexpected name", + }, + { + desc: "unexpected transport socket URL", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{}), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "transport_socket field has unexpected typeURL", + }, + { + desc: "badly marshaled transport socket", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3DownstreamTLSContextURL, + Value: []byte{1, 2, 3, 4}, + }, + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", + }, + { + desc: "missing CommonTlsContext", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{}), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", + }, + { + desc: "require_sni-set-to-true-in-downstreamTlsContext", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireSni: &wrapperspb.BoolValue{Value: true}, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "require_sni field set to true in DownstreamTlsContext message", + }, + { + desc: "unsupported-ocsp_staple_policy-in-downstreamTlsContext", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + OcspStaplePolicy: v3tlspb.DownstreamTlsContext_STRICT_STAPLING, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "ocsp_staple_policy field set to unsupported value in DownstreamTlsContext message", + }, + { + desc: "unsupported validation context in transport socket", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", + }, + }, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "validation context contains unexpected type", + }, + { + desc: "unsupported match_subject_alt_names field in transport socket", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", + }, + }, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "validation context contains unexpected type", + }, + { + desc: "no root certificate provider with require_client_cert", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", + }, + { + desc: "no identity certificate provider", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{}, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + _, err := NewFilterChainManager(test.lis) + if err == nil || !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) + } + }) + } +} + +// TestNewFilterChainImpl_Success_RouteUpdate tests the construction of the +// filter chain with valid HTTP Filters present. +func (s) TestNewFilterChainImpl_Success_RouteUpdate(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + tests := []struct { + name string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + name: "rds", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-1", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-1", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + RouteConfigName: "route-1", + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + RouteConfigName: "route-1", + HTTPFilters: routerFilterList, + }, + RouteConfigNames: map[string]bool{"route-1": true}, + }, + }, + { + name: "inline route config", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + // two rds tests whether the Filter Chain Manager successfully persists + // the two RDS names that need to be dynamically queried. + { + name: "two rds", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-1", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: "route-2", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + RouteConfigName: "route-1", + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + RouteConfigName: "route-2", + HTTPFilters: routerFilterList, + }, + RouteConfigNames: map[string]bool{ + "route-1": true, + "route-2": true, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{}), cmpOpts) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +// TestNewFilterChainImpl_Failure_BadRouteUpdate verifies cases where the Route +// Update in the filter chain are invalid. +func (s) TestNewFilterChainImpl_Failure_BadRouteUpdate(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + tests := []struct { + name string + lis *v3listenerpb.Listener + wantErr string + }{ + { + name: "missing-route-specifier", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + wantErr: "no RouteSpecifier", + }, + { + name: "not-ads", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + RouteConfigName: "route-1", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + RouteConfigName: "route-1", + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + wantErr: "ConfigSource is not ADS", + }, + { + name: "unsupported-route-specifier", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + wantErr: "unsupported type", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := NewFilterChainManager(test.lis) + if err == nil || !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) + } + }) + } +} + +// TestNewFilterChainImpl_Failure_BadHTTPFilters verifies cases where the HTTP +// Filters in the filter chain are invalid. +func (s) TestNewFilterChainImpl_Failure_BadHTTPFilters(t *testing.T) { + tests := []struct { + name string + lis *v3listenerpb.Listener + wantErr string + }{ + { + name: "client side HTTP filter", + lis: &v3listenerpb.Listener{ + Name: "grpc/server?xds.resource.listening_address=0.0.0.0:9999", + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + { + Name: "clientOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: clientOnlyCustomFilterConfig}, + }, + }, + }), + }, + }, + }, + }, + }, + }, + wantErr: "invalid server side HTTP Filters", + }, + { + name: "one valid then one invalid HTTP filter", + lis: &v3listenerpb.Listener{ + Name: "grpc/server?xds.resource.listening_address=0.0.0.0:9999", + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + { + Name: "clientOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: clientOnlyCustomFilterConfig}, + }, + }, + }), + }, + }, + }, + }, + }, + }, + wantErr: "invalid server side HTTP Filters", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := NewFilterChainManager(test.lis) + if err == nil || !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: %s", err, test.wantErr) + } + }) + } +} + +// TestNewFilterChainImpl_Success_HTTPFilters tests the construction of the +// filter chain with valid HTTP Filters present. +func (s) TestNewFilterChainImpl_Success_HTTPFilters(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + tests := []struct { + name string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + name: "singular valid http filter", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + emptyRouterFilter, + }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + emptyRouterFilter, + }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + routerFilter, + }, + InlineRouteConfig: inlineRouteConfig, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + routerFilter, + }, + InlineRouteConfig: inlineRouteConfig, + }, + }, + }, + { + name: "two valid http filters", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + validServerSideHTTPFilter2, + emptyRouterFilter, + }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + validServerSideHTTPFilter2, + emptyRouterFilter, + }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + { + Name: "serverOnlyCustomFilter2", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + routerFilter, + }, + InlineRouteConfig: inlineRouteConfig, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + { + Name: "serverOnlyCustomFilter2", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + routerFilter, + }, + InlineRouteConfig: inlineRouteConfig, + }, + }, + }, + // In the case of two HTTP Connection Manager's being present, the + // second HTTP Connection Manager should be validated, but ignored. + { + name: "two hcms", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + validServerSideHTTPFilter2, + emptyRouterFilter, + }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + { + Name: "hcm2", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + emptyRouterFilter, + }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: []*v3listenerpb.Filter{ + { + Name: "hcm", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + validServerSideHTTPFilter2, + emptyRouterFilter, + }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + { + Name: "hcm2", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + HttpFilters: []*v3httppb.HttpFilter{ + validServerSideHTTPFilter1, + emptyRouterFilter, + }, + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: {HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + { + Name: "serverOnlyCustomFilter2", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + routerFilter, + }, + InlineRouteConfig: inlineRouteConfig, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{HTTPFilters: []HTTPFilter{ + { + Name: "serverOnlyCustomFilter", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + { + Name: "serverOnlyCustomFilter2", + Filter: serverOnlyHTTPFilter{}, + Config: filterConfig{Cfg: serverOnlyCustomFilterConfig}, + }, + routerFilter, + }, + InlineRouteConfig: inlineRouteConfig, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{}), cmpOpts) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +// TestNewFilterChainImpl_Success_SecurityConfig verifies cases where the +// security configuration in the filter chain contains valid data. +func (s) TestNewFilterChainImpl_Success_SecurityConfig(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + tests := []struct { + desc string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + desc: "empty transport socket", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Filters: emptyValidNetworkFilters, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "no validation context", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "validation context with certificate provider", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + }, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultRootPluginInstance", + CertificateName: "defaultRootCertName", + }, + }, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + RootInstanceName: "rootPluginInstance", + RootCertName: "rootCertName", + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + RequireClientCert: true, + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + RootInstanceName: "defaultRootPluginInstance", + RootCertName: "defaultRootCertName", + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + RequireClientCert: true, + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{}), cmpopts.EquateEmpty()) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +// TestNewFilterChainImpl_Success_UnsupportedMatchFields verifies cases where +// there are multiple filter chains, and one of them is valid while the other +// contains unsupported match fields. These configurations should lead to +// success at config validation time and the filter chains which contains +// unsupported match fields will be skipped at lookup time. +func (s) TestNewFilterChainImpl_Success_UnsupportedMatchFields(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + unspecifiedEntry := &destPrefixEntry{ + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + } + + tests := []struct { + desc string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + desc: "unsupported destination port", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "good-chain", + Filters: emptyValidNetworkFilters, + }, + { + Name: "unsupported-destination-port", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + DestinationPort: &wrapperspb.UInt32Value{Value: 666}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: unspecifiedEntry, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "unsupported server names", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "good-chain", + Filters: emptyValidNetworkFilters, + }, + { + Name: "unsupported-server-names", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + ServerNames: []string{"example-server"}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: unspecifiedEntry, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "unsupported transport protocol", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "good-chain", + Filters: emptyValidNetworkFilters, + }, + { + Name: "unsupported-transport-protocol", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + TransportProtocol: "tls", + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: unspecifiedEntry, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "unsupported application protocol", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "good-chain", + Filters: emptyValidNetworkFilters, + }, + { + Name: "unsupported-application-protocol", + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + ApplicationProtocols: []string{"h2"}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: unspecifiedEntry, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{}), cmpopts.EquateEmpty()) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +// TestNewFilterChainImpl_Success_AllCombinations verifies different +// combinations of the supported match criteria. +func (s) TestNewFilterChainImpl_Success_AllCombinations(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + tests := []struct { + desc string + lis *v3listenerpb.Listener + wantFC *FilterChainManager + }{ + { + desc: "multiple destination prefixes", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + // Unspecified destination prefix. + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + Filters: emptyValidNetworkFilters, + }, + { + // v4 wildcard destination prefix. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + Filters: emptyValidNetworkFilters, + }, + { + // v6 wildcard destination prefix. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("::", 0)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}}, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "0.0.0.0/0": { + net: ipNetFromCIDR("0.0.0.0/0"), + srcTypeArr: [3]*sourcePrefixes{ + nil, + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "::/0": { + net: ipNetFromCIDR("::/0"), + srcTypeArr: [3]*sourcePrefixes{ + nil, + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "10.0.0.0/8": { + net: ipNetFromCIDR("10.0.0.0/8"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "multiple source types", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + nil, + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "multiple source prefixes", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "10.0.0.0/8": { + net: ipNetFromCIDR("10.0.0.0/8"), + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.0.0/16"), + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "multiple source ports", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + SourcePorts: []uint32{1, 2, 3}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 1: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + 2: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + 3: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + nil, + nil, + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.0.0/16"), + srcPortMap: map[int]*FilterChain{ + 1: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + 2: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + 3: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + { + desc: "some chains have unsupported fields", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}, + TransportProtocol: "raw_buffer", + }, + Filters: emptyValidNetworkFilters, + }, + { + // This chain will be dropped in favor of the above + // filter chain because they both have the same + // destination prefix, but this one has an empty + // transport protocol while the above chain has the more + // preferred "raw_buffer". + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 8)}, + TransportProtocol: "", + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("10.0.0.0", 16)}, + }, + Filters: emptyValidNetworkFilters, + }, + { + // This chain will be dropped for unsupported server + // names. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.1", 32)}, + ServerNames: []string{"foo", "bar"}, + }, + Filters: emptyValidNetworkFilters, + }, + { + // This chain will be dropped for unsupported transport + // protocol. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.2", 32)}, + TransportProtocol: "not-raw-buffer", + }, + Filters: emptyValidNetworkFilters, + }, + { + // This chain will be dropped for unsupported + // application protocol. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.3", 32)}, + ApplicationProtocols: []string{"h2"}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{Filters: emptyValidNetworkFilters}, + }, + wantFC: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "192.168.0.0/16": { + net: ipNetFromCIDR("192.168.2.2/16"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "10.0.0.0/8": { + net: ipNetFromCIDR("10.0.0.0/8"), + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + "192.168.100.1/32": { + net: ipNetFromCIDR("192.168.100.1/32"), + srcTypeArr: [3]*sourcePrefixes{}, + }, + "192.168.100.2/32": { + net: ipNetFromCIDR("192.168.100.2/32"), + srcTypeArr: [3]*sourcePrefixes{}, + }, + "192.168.100.3/32": { + net: ipNetFromCIDR("192.168.100.3/32"), + srcTypeArr: [3]*sourcePrefixes{}, + }, + }, + def: &FilterChain{ + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + gotFC, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() returned err: %v, wantErr: nil", err) + } + if !cmp.Equal(gotFC, test.wantFC, cmp.AllowUnexported(FilterChainManager{}, destPrefixEntry{}, sourcePrefixes{}, sourcePrefixEntry{})) { + t.Fatalf("NewFilterChainManager() returned %+v, want: %+v", gotFC, test.wantFC) + } + }) + } +} + +func (s) TestLookup_Failures(t *testing.T) { + tests := []struct { + desc string + lis *v3listenerpb.Listener + params FilterChainLookupParams + wantErr string + }{ + { + desc: "no destination prefix match", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + Filters: emptyValidNetworkFilters, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(10, 1, 1, 1), + }, + wantErr: "no matching filter chain based on destination prefix match", + }, + { + desc: "no source type match", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 2), + }, + wantErr: "no matching filter chain based on source type match", + }, + { + desc: "no source prefix match", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + }, + wantErr: "no matching filter chain after all match criteria", + }, + { + desc: "multiple matching filter chains", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}, + SourcePorts: []uint32{1}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + params: FilterChainLookupParams{ + // IsUnspecified is not set. This means that the destination + // prefix matchers will be ignored. + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 1, + }, + wantErr: "multiple matching filter chains", + }, + { + desc: "no default filter chain", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3}}, + Filters: emptyValidNetworkFilters, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 80, + }, + wantErr: "no matching filter chain after all match criteria", + }, + { + desc: "most specific match dropped for unsupported field", + lis: &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + // This chain will be picked in the destination prefix + // stage, but will be dropped at the server names stage. + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.1", 32)}, + ServerNames: []string{"foo"}, + }, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.100.0", 16)}, + }, + Filters: emptyValidNetworkFilters, + }, + }, + }, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 80, + }, + wantErr: "no matching filter chain based on source type match", + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + fci, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() failed: %v", err) + } + fc, err := fci.Lookup(test.params) + if err == nil || !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("FilterChainManager.Lookup(%v) = (%v, %v) want (nil, %s)", test.params, fc, err, test.wantErr) + } + }) + } +} + +func (s) TestLookup_Successes(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + lisWithDefaultChain := &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: "instance1"}, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + }, + // A default filter chain with an empty transport socket. + DefaultFilterChain: &v3listenerpb.FilterChain{ + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: "default"}, + }, + }), + }, + }, + Filters: emptyValidNetworkFilters, + }, + } + lisWithoutDefaultChain := &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: transportSocketWithInstanceName("unspecified-dest-and-source-prefix"), + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("0.0.0.0", 0)}, + }, + TransportSocket: transportSocketWithInstanceName("wildcard-prefixes-v4"), + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("::", 0)}, + }, + TransportSocket: transportSocketWithInstanceName("wildcard-source-prefix-v6"), + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 16)}}, + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-unspecified-source-type"), + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type"), + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.92.1", 24)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + }, + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type-specific-source-prefix"), + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.1.1", 24)}, + SourcePrefixRanges: []*v3corepb.CidrRange{cidrRangeFromAddressAndPrefixLen("192.168.92.1", 24)}, + SourceType: v3listenerpb.FilterChainMatch_EXTERNAL, + SourcePorts: []uint32{80}, + }, + TransportSocket: transportSocketWithInstanceName("specific-destination-prefix-specific-source-type-specific-source-prefix-specific-source-port"), + Filters: emptyValidNetworkFilters, + }, + }, + } + + tests := []struct { + desc string + lis *v3listenerpb.Listener + params FilterChainLookupParams + wantFC *FilterChain + }{ + { + desc: "default filter chain", + lis: lisWithDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(10, 1, 1, 1), + }, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "default"}, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + { + desc: "unspecified destination match", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.ParseIP("2001:68::db8"), + SourceAddr: net.IPv4(10, 1, 1, 1), + SourcePort: 1, + }, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "unspecified-dest-and-source-prefix"}, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + { + desc: "wildcard destination match v4", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(10, 1, 1, 1), + SourceAddr: net.IPv4(10, 1, 1, 1), + SourcePort: 1, + }, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-prefixes-v4"}, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + { + desc: "wildcard source match v6", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.ParseIP("2001:68::1"), + SourceAddr: net.ParseIP("2001:68::2"), + SourcePort: 1, + }, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "wildcard-source-prefix-v6"}, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + { + desc: "specific destination and wildcard source type match", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 100, 1), + SourceAddr: net.IPv4(192, 168, 100, 1), + SourcePort: 80, + }, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-unspecified-source-type"}, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + { + desc: "specific destination and source type match", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 1, 1), + SourceAddr: net.IPv4(10, 1, 1, 1), + SourcePort: 80, + }, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type"}, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + { + desc: "specific destination source type and source prefix", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 1, 1), + SourceAddr: net.IPv4(192, 168, 92, 100), + SourcePort: 70, + }, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix"}, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + { + desc: "specific destination source type source prefix and source port", + lis: lisWithoutDefaultChain, + params: FilterChainLookupParams{ + IsUnspecifiedListener: true, + DestAddr: net.IPv4(192, 168, 1, 1), + SourceAddr: net.IPv4(192, 168, 92, 100), + SourcePort: 80, + }, + wantFC: &FilterChain{ + SecurityCfg: &SecurityConfig{IdentityInstanceName: "specific-destination-prefix-specific-source-type-specific-source-prefix-specific-source-port"}, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + fci, err := NewFilterChainManager(test.lis) + if err != nil { + t.Fatalf("NewFilterChainManager() failed: %v", err) + } + gotFC, err := fci.Lookup(test.params) + if err != nil { + t.Fatalf("FilterChainManager.Lookup(%v) failed: %v", test.params, err) + } + if !cmp.Equal(gotFC, test.wantFC, cmpopts.EquateEmpty()) { + t.Fatalf("FilterChainManager.Lookup(%v) = %v, want %v", test.params, gotFC, test.wantFC) + } + }) + } +} + +type filterCfg struct { + httpfilter.FilterConfig + // Level is what differentiates top level filters ("top level") vs. second + // level ("virtual host level"), and third level ("route level"). + level string +} + +type filterBuilder struct { + httpfilter.Filter +} + +var _ httpfilter.ServerInterceptorBuilder = &filterBuilder{} + +func (fb *filterBuilder) BuildServerInterceptor(config httpfilter.FilterConfig, override httpfilter.FilterConfig) (iresolver.ServerInterceptor, error) { + var level string + level = config.(filterCfg).level + + if override != nil { + level = override.(filterCfg).level + } + return &serverInterceptor{level: level}, nil +} + +type serverInterceptor struct { + level string +} + +func (si *serverInterceptor) AllowRPC(context.Context) error { + return errors.New(si.level) +} + +func (s) TestHTTPFilterInstantiation(t *testing.T) { + tests := []struct { + name string + filters []HTTPFilter + routeConfig RouteConfigUpdate + // A list of strings which will be built from iterating through the + // filters ["top level", "vh level", "route level", "route level"...] + // wantErrs is the list of error strings that will be constructed from + // the deterministic iteration through the vh list and route list. The + // error string will be determined by the level of config that the + // filter builder receives (i.e. top level, vs. virtual host level vs. + // route level). + wantErrs []string + }{ + { + name: "one http filter no overrides", + filters: []HTTPFilter{ + {Name: "server-interceptor", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + }, + }, + }, + }}, + wantErrs: []string{topLevel}, + }, + { + name: "one http filter vh override", + filters: []HTTPFilter{ + {Name: "server-interceptor", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + }, + }, + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor": filterCfg{level: vhLevel}, + }, + }, + }}, + wantErrs: []string{vhLevel}, + }, + { + name: "one http filter route override", + filters: []HTTPFilter{ + {Name: "server-interceptor", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor": filterCfg{level: rLevel}, + }, + }, + }, + }, + }}, + wantErrs: []string{rLevel}, + }, + // This tests the scenario where there are three http filters, and one + // gets overridden by route and one by virtual host. + { + name: "three http filters vh override route override", + filters: []HTTPFilter{ + {Name: "server-interceptor1", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + {Name: "server-interceptor2", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + {Name: "server-interceptor3", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor3": filterCfg{level: rLevel}, + }, + }, + }, + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor2": filterCfg{level: vhLevel}, + }, + }, + }}, + wantErrs: []string{topLevel, vhLevel, rLevel}, + }, + // This tests the scenario where there are three http filters, and two + // virtual hosts with different vh + route overrides for each virtual + // host. + { + name: "three http filters two vh", + filters: []HTTPFilter{ + {Name: "server-interceptor1", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + {Name: "server-interceptor2", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + {Name: "server-interceptor3", Filter: &filterBuilder{}, Config: filterCfg{level: topLevel}}, + }, + routeConfig: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor3": filterCfg{level: rLevel}, + }, + }, + }, + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor2": filterCfg{level: vhLevel}, + }, + }, + { + Domains: []string{"target"}, + Routes: []*Route{{ + Prefix: newStringP("1"), + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor1": filterCfg{level: rLevel}, + "server-interceptor2": filterCfg{level: rLevel}, + }, + }, + }, + HTTPFilterConfigOverride: map[string]httpfilter.FilterConfig{ + "server-interceptor2": filterCfg{level: vhLevel}, + "server-interceptor3": filterCfg{level: vhLevel}, + }, + }, + }}, + wantErrs: []string{topLevel, vhLevel, rLevel, rLevel, rLevel, vhLevel}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fc := FilterChain{ + HTTPFilters: test.filters, + } + vhswi, err := fc.ConstructUsableRouteConfiguration(test.routeConfig) + if err != nil { + t.Fatalf("Error constructing usable route configuration: %v", err) + } + // Build out list of errors by iterating through the virtual hosts and routes, + // and running the filters in route configurations. + var errs []string + for _, vh := range vhswi { + for _, r := range vh.Routes { + for _, int := range r.Interceptors { + errs = append(errs, int.AllowRPC(context.Background()).Error()) + } + } + } + if !cmp.Equal(errs, test.wantErrs) { + t.Fatalf("List of errors %v, want %v", errs, test.wantErrs) + } + }) + } +} + +// The Equal() methods defined below help with using cmp.Equal() on these types +// which contain all unexported fields. + +func (fci *FilterChainManager) Equal(other *FilterChainManager) bool { + if (fci == nil) != (other == nil) { + return false + } + if fci == nil { + return true + } + switch { + case !cmp.Equal(fci.dstPrefixMap, other.dstPrefixMap, cmpopts.EquateEmpty()): + return false + // TODO: Support comparing dstPrefixes slice? + case !cmp.Equal(fci.def, other.def, cmpopts.EquateEmpty(), protocmp.Transform()): + return false + case !cmp.Equal(fci.RouteConfigNames, other.RouteConfigNames, cmpopts.EquateEmpty()): + return false + } + return true +} + +func (dpe *destPrefixEntry) Equal(other *destPrefixEntry) bool { + if (dpe == nil) != (other == nil) { + return false + } + if dpe == nil { + return true + } + if !cmp.Equal(dpe.net, other.net) { + return false + } + for i, st := range dpe.srcTypeArr { + if !cmp.Equal(st, other.srcTypeArr[i], cmpopts.EquateEmpty()) { + return false + } + } + return true +} + +func (sp *sourcePrefixes) Equal(other *sourcePrefixes) bool { + if (sp == nil) != (other == nil) { + return false + } + if sp == nil { + return true + } + // TODO: Support comparing srcPrefixes slice? + return cmp.Equal(sp.srcPrefixMap, other.srcPrefixMap, cmpopts.EquateEmpty()) +} + +func (spe *sourcePrefixEntry) Equal(other *sourcePrefixEntry) bool { + if (spe == nil) != (other == nil) { + return false + } + if spe == nil { + return true + } + switch { + case !cmp.Equal(spe.net, other.net): + return false + case !cmp.Equal(spe.srcPortMap, other.srcPortMap, cmpopts.EquateEmpty(), protocmp.Transform()): + return false + } + return true +} + +// The String() methods defined below help with debugging test failures as the +// regular %v or %+v formatting directives do not expands pointer fields inside +// structs, and these types have a lot of pointers pointing to other structs. +func (fci *FilterChainManager) String() string { + if fci == nil { + return "" + } + + var sb strings.Builder + if fci.dstPrefixMap != nil { + sb.WriteString("destination_prefix_map: map {\n") + for k, v := range fci.dstPrefixMap { + sb.WriteString(fmt.Sprintf("%q: %v\n", k, v)) + } + sb.WriteString("}\n") + } + if fci.dstPrefixes != nil { + sb.WriteString("destination_prefixes: [") + for _, p := range fci.dstPrefixes { + sb.WriteString(fmt.Sprintf("%v ", p)) + } + sb.WriteString("]") + } + if fci.def != nil { + sb.WriteString(fmt.Sprintf("default_filter_chain: %+v ", fci.def)) + } + return sb.String() +} + +func (dpe *destPrefixEntry) String() string { + if dpe == nil { + return "" + } + var sb strings.Builder + if dpe.net != nil { + sb.WriteString(fmt.Sprintf("destination_prefix: %s ", dpe.net.String())) + } + sb.WriteString("source_types_array: [") + for _, st := range dpe.srcTypeArr { + sb.WriteString(fmt.Sprintf("%v ", st)) + } + sb.WriteString("]") + return sb.String() +} + +func (sp *sourcePrefixes) String() string { + if sp == nil { + return "" + } + var sb strings.Builder + if sp.srcPrefixMap != nil { + sb.WriteString("source_prefix_map: map {") + for k, v := range sp.srcPrefixMap { + sb.WriteString(fmt.Sprintf("%q: %v ", k, v)) + } + sb.WriteString("}") + } + if sp.srcPrefixes != nil { + sb.WriteString("source_prefixes: [") + for _, p := range sp.srcPrefixes { + sb.WriteString(fmt.Sprintf("%v ", p)) + } + sb.WriteString("]") + } + return sb.String() +} + +func (spe *sourcePrefixEntry) String() string { + if spe == nil { + return "" + } + var sb strings.Builder + if spe.net != nil { + sb.WriteString(fmt.Sprintf("source_prefix: %s ", spe.net.String())) + } + if spe.srcPortMap != nil { + sb.WriteString("source_ports_map: map {") + for k, v := range spe.srcPortMap { + sb.WriteString(fmt.Sprintf("%d: %+v ", k, v)) + } + sb.WriteString("}") + } + return sb.String() +} + +func (f *FilterChain) String() string { + if f == nil || f.SecurityCfg == nil { + return "" + } + return fmt.Sprintf("security_config: %v", f.SecurityCfg) +} + +func ipNetFromCIDR(cidr string) *net.IPNet { + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + panic(err) + } + return ipnet +} + +func transportSocketWithInstanceName(name string) *v3corepb.TransportSocket { + return &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{InstanceName: name}, + }, + }), + }, + } +} + +func cidrRangeFromAddressAndPrefixLen(address string, len int) *v3corepb.CidrRange { + return &v3corepb.CidrRange{ + AddressPrefix: address, + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(len), + }, + } +} diff --git a/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/xds/internal/xdsclient/xdsresource/listener_resource_type.go new file mode 100644 index 000000000000..0aff941389ec --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // ListenerResourceTypeName represents the transport agnostic name for the + // listener resource. + ListenerResourceTypeName = "ListenerResource" +) + +var ( + // Compile time interface checks. + _ Type = listenerResourceType{} + + // Singleton instantiation of the resource type implementation. + listenerType = listenerResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3ListenerURL, + typeName: ListenerResourceTypeName, + allResourcesRequiredInSotW: true, + }, + } +) + +// listenerResourceType provides the resource-type specific functionality for a +// Listener resource. +// +// Implements the Type interface. +type listenerResourceType struct { + resourceTypeState +} + +func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error { + if sc == nil { + return nil + } + if sc.IdentityInstanceName != "" { + if _, ok := bc.CertProviderConfigs[sc.IdentityInstanceName]; !ok { + return fmt.Errorf("identitiy certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) + } + } + if sc.RootInstanceName != "" { + if _, ok := bc.CertProviderConfigs[sc.RootInstanceName]; !ok { + return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) + } + } + return nil +} + +func listenerValidator(bc *bootstrap.Config, lis ListenerUpdate) error { + if lis.InboundListenerCfg == nil || lis.InboundListenerCfg.FilterChains == nil { + return nil + } + return lis.InboundListenerCfg.FilterChains.Validate(func(fc *FilterChain) error { + if fc == nil { + return nil + } + return securityConfigValidator(bc, fc.SecurityCfg) + }) +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (listenerResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, listener, err := unmarshalListenerResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: ListenerUpdate{}}}, err + } + + // Perform extra validation here. + if err := listenerValidator(opts.BootstrapConfig, listener); err != nil { + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: ListenerUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &ListenerResourceData{Resource: listener}}, nil + +} + +// ListenerResourceData wraps the configuration of a Listener resource as +// received from the management server. +// +// Implements the ResourceData interface. +type ListenerResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource ListenerUpdate +} + +// Equal returns true if other is equal to l. +func (l *ListenerResourceData) Equal(other ResourceData) bool { + if l == nil && other == nil { + return true + } + if (l == nil) != (other == nil) { + return false + } + return proto.Equal(l.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (l *ListenerResourceData) ToJSON() string { + return pretty.ToJSON(l.Resource) +} + +// Raw returns the underlying raw protobuf form of the listener resource. +func (l *ListenerResourceData) Raw() *anypb.Any { + return l.Resource.Raw +} + +// ListenerWatcher wraps the callbacks to be invoked for different +// events corresponding to the listener resource being watched. +type ListenerWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*ListenerResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +type delegatingListenerWatcher struct { + watcher ListenerWatcher +} + +func (d *delegatingListenerWatcher) OnUpdate(data ResourceData) { + l := data.(*ListenerResourceData) + d.watcher.OnUpdate(l) +} + +func (d *delegatingListenerWatcher) OnError(err error) { + d.watcher.OnError(err) +} + +func (d *delegatingListenerWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() +} + +// WatchListener uses xDS to discover the configuration associated with the +// provided listener resource name. +func WatchListener(p Producer, name string, w ListenerWatcher) (cancel func()) { + delegator := &delegatingListenerWatcher{watcher: w} + return p.WatchResource(listenerType, name, delegator) +} diff --git a/xds/internal/xdsclient/xdsresource/logging.go b/xds/internal/xdsclient/xdsresource/logging.go new file mode 100644 index 000000000000..62bcb016ba25 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/logging.go @@ -0,0 +1,28 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsresource + +import ( + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +const prefix = "[xds-resource] " + +var logger = internalgrpclog.NewPrefixLogger(grpclog.Component("xds"), prefix) diff --git a/xds/internal/xdsclient/xdsresource/matcher.go b/xds/internal/xdsclient/xdsresource/matcher.go new file mode 100644 index 000000000000..77aa85b68e58 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/matcher.go @@ -0,0 +1,280 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/metadata" +) + +// RouteToMatcher converts a route to a Matcher to match incoming RPC's against. +func RouteToMatcher(r *Route) (*CompositeMatcher, error) { + var pm pathMatcher + switch { + case r.Regex != nil: + pm = newPathRegexMatcher(r.Regex) + case r.Path != nil: + pm = newPathExactMatcher(*r.Path, r.CaseInsensitive) + case r.Prefix != nil: + pm = newPathPrefixMatcher(*r.Prefix, r.CaseInsensitive) + default: + return nil, fmt.Errorf("illegal route: missing path_matcher") + } + + headerMatchers := make([]matcher.HeaderMatcher, 0, len(r.Headers)) + for _, h := range r.Headers { + var matcherT matcher.HeaderMatcher + invert := h.InvertMatch != nil && *h.InvertMatch + switch { + case h.ExactMatch != nil && *h.ExactMatch != "": + matcherT = matcher.NewHeaderExactMatcher(h.Name, *h.ExactMatch, invert) + case h.RegexMatch != nil: + matcherT = matcher.NewHeaderRegexMatcher(h.Name, h.RegexMatch, invert) + case h.PrefixMatch != nil && *h.PrefixMatch != "": + matcherT = matcher.NewHeaderPrefixMatcher(h.Name, *h.PrefixMatch, invert) + case h.SuffixMatch != nil && *h.SuffixMatch != "": + matcherT = matcher.NewHeaderSuffixMatcher(h.Name, *h.SuffixMatch, invert) + case h.RangeMatch != nil: + matcherT = matcher.NewHeaderRangeMatcher(h.Name, h.RangeMatch.Start, h.RangeMatch.End, invert) + case h.PresentMatch != nil: + matcherT = matcher.NewHeaderPresentMatcher(h.Name, *h.PresentMatch, invert) + case h.StringMatch != nil: + matcherT = matcher.NewHeaderStringMatcher(h.Name, *h.StringMatch, invert) + default: + return nil, fmt.Errorf("illegal route: missing header_match_specifier") + } + headerMatchers = append(headerMatchers, matcherT) + } + + var fractionMatcher *fractionMatcher + if r.Fraction != nil { + fractionMatcher = newFractionMatcher(*r.Fraction) + } + return newCompositeMatcher(pm, headerMatchers, fractionMatcher), nil +} + +// CompositeMatcher is a matcher that holds onto many matchers and aggregates +// the matching results. +type CompositeMatcher struct { + pm pathMatcher + hms []matcher.HeaderMatcher + fm *fractionMatcher +} + +func newCompositeMatcher(pm pathMatcher, hms []matcher.HeaderMatcher, fm *fractionMatcher) *CompositeMatcher { + return &CompositeMatcher{pm: pm, hms: hms, fm: fm} +} + +// Match returns true if all matchers return true. +func (a *CompositeMatcher) Match(info iresolver.RPCInfo) bool { + if a.pm != nil && !a.pm.match(info.Method) { + return false + } + + // Call headerMatchers even if md is nil, because routes may match + // non-presence of some headers. + var md metadata.MD + if info.Context != nil { + md, _ = metadata.FromOutgoingContext(info.Context) + if extraMD, ok := grpcutil.ExtraMetadata(info.Context); ok { + md = metadata.Join(md, extraMD) + // Remove all binary headers. They are hard to match with. May need + // to add back if asked by users. + for k := range md { + if strings.HasSuffix(k, "-bin") { + delete(md, k) + } + } + } + } + for _, m := range a.hms { + if !m.Match(md) { + return false + } + } + + if a.fm != nil && !a.fm.match() { + return false + } + return true +} + +func (a *CompositeMatcher) String() string { + var ret string + if a.pm != nil { + ret += a.pm.String() + } + for _, m := range a.hms { + ret += m.String() + } + if a.fm != nil { + ret += a.fm.String() + } + return ret +} + +type fractionMatcher struct { + fraction int64 // real fraction is fraction/1,000,000. +} + +func newFractionMatcher(fraction uint32) *fractionMatcher { + return &fractionMatcher{fraction: int64(fraction)} +} + +// RandInt63n overwrites grpcrand for control in tests. +var RandInt63n = grpcrand.Int63n + +func (fm *fractionMatcher) match() bool { + t := RandInt63n(1000000) + return t <= fm.fraction +} + +func (fm *fractionMatcher) String() string { + return fmt.Sprintf("fraction:%v", fm.fraction) +} + +type domainMatchType int + +const ( + domainMatchTypeInvalid domainMatchType = iota + domainMatchTypeUniversal + domainMatchTypePrefix + domainMatchTypeSuffix + domainMatchTypeExact +) + +// Exact > Suffix > Prefix > Universal > Invalid. +func (t domainMatchType) betterThan(b domainMatchType) bool { + return t > b +} + +func matchTypeForDomain(d string) domainMatchType { + if d == "" { + return domainMatchTypeInvalid + } + if d == "*" { + return domainMatchTypeUniversal + } + if strings.HasPrefix(d, "*") { + return domainMatchTypeSuffix + } + if strings.HasSuffix(d, "*") { + return domainMatchTypePrefix + } + if strings.Contains(d, "*") { + return domainMatchTypeInvalid + } + return domainMatchTypeExact +} + +func match(domain, host string) (domainMatchType, bool) { + switch typ := matchTypeForDomain(domain); typ { + case domainMatchTypeInvalid: + return typ, false + case domainMatchTypeUniversal: + return typ, true + case domainMatchTypePrefix: + // abc.* + return typ, strings.HasPrefix(host, strings.TrimSuffix(domain, "*")) + case domainMatchTypeSuffix: + // *.123 + return typ, strings.HasSuffix(host, strings.TrimPrefix(domain, "*")) + case domainMatchTypeExact: + return typ, domain == host + default: + return domainMatchTypeInvalid, false + } +} + +// FindBestMatchingVirtualHost returns the virtual host whose domains field best +// matches host +// +// The domains field support 4 different matching pattern types: +// +// - Exact match +// - Suffix match (e.g. “*ABC”) +// - Prefix match (e.g. “ABC*) +// - Universal match (e.g. “*”) +// +// The best match is defined as: +// - A match is better if it’s matching pattern type is better. +// * Exact match > suffix match > prefix match > universal match. +// +// - If two matches are of the same pattern type, the longer match is +// better. +// * This is to compare the length of the matching pattern, e.g. “*ABCDE” > +// “*ABC” +func FindBestMatchingVirtualHost(host string, vHosts []*VirtualHost) *VirtualHost { // Maybe move this crap to client + var ( + matchVh *VirtualHost + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, host) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} + +// FindBestMatchingVirtualHostServer returns the virtual host whose domains field best +// matches authority. +func FindBestMatchingVirtualHostServer(authority string, vHosts []VirtualHostWithInterceptors) *VirtualHostWithInterceptors { + var ( + matchVh *VirtualHostWithInterceptors + matchType = domainMatchTypeInvalid + matchLen int + ) + for _, vh := range vHosts { + for _, domain := range vh.Domains { + typ, matched := match(domain, authority) + if typ == domainMatchTypeInvalid { + // The rds response is invalid. + return nil + } + if matchType.betterThan(typ) || matchType == typ && matchLen >= len(domain) || !matched { + // The previous match has better type, or the previous match has + // better length, or this domain isn't a match. + continue + } + matchVh = &vh + matchType = typ + matchLen = len(domain) + } + } + return matchVh +} diff --git a/xds/internal/resolver/matcher_path.go b/xds/internal/xdsclient/xdsresource/matcher_path.go similarity index 94% rename from xds/internal/resolver/matcher_path.go rename to xds/internal/xdsclient/xdsresource/matcher_path.go index 011d1a94c49c..da487e20c58e 100644 --- a/xds/internal/resolver/matcher_path.go +++ b/xds/internal/xdsclient/xdsresource/matcher_path.go @@ -13,17 +13,18 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package resolver +package xdsresource import ( "regexp" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) -type pathMatcherInterface interface { +type pathMatcher interface { match(path string) bool String() string } @@ -93,7 +94,7 @@ func newPathRegexMatcher(re *regexp.Regexp) *pathRegexMatcher { } func (prm *pathRegexMatcher) match(path string) bool { - return prm.re.MatchString(path) + return grpcutil.FullMatchWithRegex(prm.re, path) } func (prm *pathRegexMatcher) String() string { diff --git a/xds/internal/resolver/matcher_path_test.go b/xds/internal/xdsclient/xdsresource/matcher_path_test.go similarity index 88% rename from xds/internal/resolver/matcher_path_test.go rename to xds/internal/xdsclient/xdsresource/matcher_path_test.go index 263a049108e4..507cf15bed85 100644 --- a/xds/internal/resolver/matcher_path_test.go +++ b/xds/internal/xdsclient/xdsresource/matcher_path_test.go @@ -13,17 +13,16 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package resolver +package xdsresource import ( "regexp" "testing" ) -func TestPathFullMatcherMatch(t *testing.T) { +func (s) TestPathFullMatcherMatch(t *testing.T) { tests := []struct { name string fullPath string @@ -47,7 +46,7 @@ func TestPathFullMatcherMatch(t *testing.T) { } } -func TestPathPrefixMatcherMatch(t *testing.T) { +func (s) TestPathPrefixMatcherMatch(t *testing.T) { tests := []struct { name string prefix string @@ -71,7 +70,7 @@ func TestPathPrefixMatcherMatch(t *testing.T) { } } -func TestPathRegexMatcherMatch(t *testing.T) { +func (s) TestPathRegexMatcherMatch(t *testing.T) { tests := []struct { name string regexPath string @@ -80,6 +79,8 @@ func TestPathRegexMatcherMatch(t *testing.T) { }{ {name: "match", regexPath: "^/s+/m.*$", path: "/sss/me", want: true}, {name: "not match", regexPath: "^/s+/m*$", path: "/sss/b", want: false}, + {name: "no match because only part of path matches with regex", regexPath: "^a+$", path: "ab", want: false}, + {name: "match because full path matches with regex", regexPath: "^a+$", path: "aa", want: true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/xds/internal/resolver/matcher_test.go b/xds/internal/xdsclient/xdsresource/matcher_test.go similarity index 53% rename from xds/internal/resolver/matcher_test.go rename to xds/internal/xdsclient/xdsresource/matcher_test.go index 7657b87bf45f..2746e58e6c77 100644 --- a/xds/internal/resolver/matcher_test.go +++ b/xds/internal/xdsclient/xdsresource/matcher_test.go @@ -13,10 +13,9 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -package resolver +package xdsresource import ( "context" @@ -25,21 +24,22 @@ import ( "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/xds/matcher" "google.golang.org/grpc/metadata" ) -func TestAndMatcherMatch(t *testing.T) { +func (s) TestAndMatcherMatch(t *testing.T) { tests := []struct { name string - pm pathMatcherInterface - hm headerMatcherInterface + pm pathMatcher + hm matcher.HeaderMatcher info iresolver.RPCInfo want bool }{ { name: "both match", pm: newPathExactMatcher("/a/b", false), - hm: newHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv", false), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -49,7 +49,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "both match with path case insensitive", pm: newPathExactMatcher("/A/B", true), - hm: newHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv", false), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -59,7 +59,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "only one match", pm: newPathExactMatcher("/a/b", false), - hm: newHeaderExactMatcher("th", "tv"), + hm: matcher.NewHeaderExactMatcher("th", "tv", false), info: iresolver.RPCInfo{ Method: "/z/y", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -69,7 +69,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "both not match", pm: newPathExactMatcher("/z/y", false), - hm: newHeaderExactMatcher("th", "abc"), + hm: matcher.NewHeaderExactMatcher("th", "abc", false), info: iresolver.RPCInfo{ Method: "/a/b", Context: metadata.NewOutgoingContext(context.Background(), metadata.Pairs("th", "tv")), @@ -79,7 +79,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "fake header", pm: newPathPrefixMatcher("/", false), - hm: newHeaderExactMatcher("content-type", "fake"), + hm: matcher.NewHeaderExactMatcher("content-type", "fake", false), info: iresolver.RPCInfo{ Method: "/a/b", Context: grpcutil.WithExtraMetadata(context.Background(), metadata.Pairs( @@ -91,7 +91,7 @@ func TestAndMatcherMatch(t *testing.T) { { name: "binary header", pm: newPathPrefixMatcher("/", false), - hm: newHeaderPresentMatcher("t-bin", true), + hm: matcher.NewHeaderPresentMatcher("t-bin", true, false), info: iresolver.RPCInfo{ Method: "/a/b", Context: grpcutil.WithExtraMetadata( @@ -105,23 +105,23 @@ func TestAndMatcherMatch(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - a := newCompositeMatcher(tt.pm, []headerMatcherInterface{tt.hm}, nil) - if got := a.match(tt.info); got != tt.want { + a := newCompositeMatcher(tt.pm, []matcher.HeaderMatcher{tt.hm}, nil) + if got := a.Match(tt.info); got != tt.want { t.Errorf("match() = %v, want %v", got, tt.want) } }) } } -func TestFractionMatcherMatch(t *testing.T) { +func (s) TestFractionMatcherMatch(t *testing.T) { const fraction = 500000 fm := newFractionMatcher(fraction) defer func() { - grpcrandInt63n = grpcrand.Int63n + RandInt63n = grpcrand.Int63n }() // rand > fraction, should return false. - grpcrandInt63n = func(n int64) int64 { + RandInt63n = func(n int64) int64 { return fraction + 1 } if matched := fm.match(); matched { @@ -129,7 +129,7 @@ func TestFractionMatcherMatch(t *testing.T) { } // rand == fraction, should return true. - grpcrandInt63n = func(n int64) int64 { + RandInt63n = func(n int64) int64 { return fraction } if matched := fm.match(); !matched { @@ -137,10 +137,56 @@ func TestFractionMatcherMatch(t *testing.T) { } // rand < fraction, should return true. - grpcrandInt63n = func(n int64) int64 { + RandInt63n = func(n int64) int64 { return fraction - 1 } if matched := fm.match(); !matched { t.Errorf("match() = %v, want match", matched) } } + +func (s) TestMatchTypeForDomain(t *testing.T) { + tests := []struct { + d string + want domainMatchType + }{ + {d: "", want: domainMatchTypeInvalid}, + {d: "*", want: domainMatchTypeUniversal}, + {d: "bar.*", want: domainMatchTypePrefix}, + {d: "*.abc.com", want: domainMatchTypeSuffix}, + {d: "foo.bar.com", want: domainMatchTypeExact}, + {d: "foo.*.com", want: domainMatchTypeInvalid}, + } + for _, tt := range tests { + if got := matchTypeForDomain(tt.d); got != tt.want { + t.Errorf("matchTypeForDomain(%q) = %v, want %v", tt.d, got, tt.want) + } + } +} + +func (s) TestMatch(t *testing.T) { + tests := []struct { + name string + domain string + host string + wantTyp domainMatchType + wantMatched bool + }{ + {name: "invalid-empty", domain: "", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, + {name: "invalid", domain: "a.*.b", host: "", wantTyp: domainMatchTypeInvalid, wantMatched: false}, + {name: "universal", domain: "*", host: "abc.com", wantTyp: domainMatchTypeUniversal, wantMatched: true}, + {name: "prefix-match", domain: "abc.*", host: "abc.123", wantTyp: domainMatchTypePrefix, wantMatched: true}, + {name: "prefix-no-match", domain: "abc.*", host: "abcd.123", wantTyp: domainMatchTypePrefix, wantMatched: false}, + {name: "suffix-match", domain: "*.123", host: "abc.123", wantTyp: domainMatchTypeSuffix, wantMatched: true}, + {name: "suffix-no-match", domain: "*.123", host: "abc.1234", wantTyp: domainMatchTypeSuffix, wantMatched: false}, + {name: "exact-match", domain: "foo.bar", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: true}, + {name: "exact-no-match", domain: "foo.bar.com", host: "foo.bar", wantTyp: domainMatchTypeExact, wantMatched: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotTyp, gotMatched := match(tt.domain, tt.host); gotTyp != tt.wantTyp || gotMatched != tt.wantMatched { + t.Errorf("match() = %v, %v, want %v, %v", gotTyp, gotMatched, tt.wantTyp, tt.wantMatched) + } + }) + } +} diff --git a/xds/internal/xdsclient/xdsresource/name.go b/xds/internal/xdsclient/xdsresource/name.go new file mode 100644 index 000000000000..80c0efd37b39 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/name.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "net/url" + "sort" + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// FederationScheme is the scheme of a federation resource name. +const FederationScheme = "xdstp" + +// Name contains the parsed component of an xDS resource name. +// +// An xDS resource name is in the format of +// xdstp://[{authority}]/{resource type}/{id/*}?{context parameters}{#processing directive,*} +// +// See +// https://github.com/cncf/xds/blob/main/proposals/TP1-xds-transport-next.md#uri-based-xds-resource-names +// for details, and examples. +type Name struct { + Scheme string + Authority string + Type string + ID string + + ContextParams map[string]string + + processingDirective string +} + +// ParseName splits the name and returns a struct representation of the Name. +// +// If the name isn't a valid new-style xDS name, field ID is set to the input. +// Note that this is not an error, because we still support the old-style +// resource names (those not starting with "xdstp:"). +// +// The caller can tell if the parsing is successful by checking the returned +// Scheme. +func ParseName(name string) *Name { + if !envconfig.XDSFederation { + // Return "" scheme to use the default authority for the server. + return &Name{ID: name} + } + if !strings.Contains(name, "://") { + // Only the long form URL, with ://, is valid. + return &Name{ID: name} + } + parsed, err := url.Parse(name) + if err != nil { + return &Name{ID: name} + } + + ret := &Name{ + Scheme: parsed.Scheme, + Authority: parsed.Host, + } + split := strings.SplitN(parsed.Path, "/", 3) + if len(split) < 3 { + // Path is in the format of "/type/id". There must be at least 3 + // segments after splitting. + return &Name{ID: name} + } + ret.Type = split[1] + ret.ID = split[2] + if len(parsed.Query()) != 0 { + ret.ContextParams = make(map[string]string) + for k, vs := range parsed.Query() { + if len(vs) > 0 { + // We only keep one value of each key. Behavior for multiple values + // is undefined. + ret.ContextParams[k] = vs[0] + } + } + } + // TODO: processing directive (the part comes after "#" in the URL, stored + // in parsed.RawFragment) is kept but not processed. Add support for that + // when it's needed. + ret.processingDirective = parsed.RawFragment + return ret +} + +// String returns a canonicalized string of name. The context parameters are +// sorted by the keys. +func (n *Name) String() string { + if n.Scheme == "" { + return n.ID + } + + // Sort and build query. + keys := make([]string, 0, len(n.ContextParams)) + for k := range n.ContextParams { + keys = append(keys, k) + } + sort.Strings(keys) + var pairs []string + for _, k := range keys { + pairs = append(pairs, strings.Join([]string{k, n.ContextParams[k]}, "=")) + } + rawQuery := strings.Join(pairs, "&") + + path := n.Type + if n.ID != "" { + path = "/" + path + "/" + n.ID + } + + tempURL := &url.URL{ + Scheme: n.Scheme, + Host: n.Authority, + Path: path, + RawQuery: rawQuery, + RawFragment: n.processingDirective, + } + return tempURL.String() +} diff --git a/xds/internal/xdsclient/xdsresource/name_test.go b/xds/internal/xdsclient/xdsresource/name_test.go new file mode 100644 index 000000000000..a30b437658f5 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/name_test.go @@ -0,0 +1,124 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/envconfig" +) + +func TestParseName(t *testing.T) { + tests := []struct { + name string + env bool // Whether federation env is set to true. + in string + want *Name + wantStr string + }{ + { + name: "env off", + env: false, + in: "xdstp://auth/type/id", + want: &Name{ID: "xdstp://auth/type/id"}, + wantStr: "xdstp://auth/type/id", + }, + { + name: "old style name", + env: true, + in: "test-resource", + want: &Name{ID: "test-resource"}, + wantStr: "test-resource", + }, + { + name: "invalid not url", + env: true, + in: "a:/b/c", + want: &Name{ID: "a:/b/c"}, + wantStr: "a:/b/c", + }, + { + name: "invalid no resource type", + env: true, + in: "xdstp://auth/id", + want: &Name{ID: "xdstp://auth/id"}, + wantStr: "xdstp://auth/id", + }, + { + name: "valid with no authority", + env: true, + in: "xdstp:///type/id", + want: &Name{Scheme: "xdstp", Authority: "", Type: "type", ID: "id"}, + wantStr: "xdstp:///type/id", + }, + { + name: "valid no ctx params", + env: true, + in: "xdstp://auth/type/id", + want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id"}, + wantStr: "xdstp://auth/type/id", + }, + { + name: "valid with ctx params", + env: true, + in: "xdstp://auth/type/id?a=1&b=2", + want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id", ContextParams: map[string]string{"a": "1", "b": "2"}}, + wantStr: "xdstp://auth/type/id?a=1&b=2", + }, + { + name: "valid with ctx params sorted by keys", + env: true, + in: "xdstp://auth/type/id?b=2&a=1", + want: &Name{Scheme: "xdstp", Authority: "auth", Type: "type", ID: "id", ContextParams: map[string]string{"a": "1", "b": "2"}}, + wantStr: "xdstp://auth/type/id?a=1&b=2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer func() func() { + oldEnv := envconfig.XDSFederation + envconfig.XDSFederation = tt.env + return func() { envconfig.XDSFederation = oldEnv } + }()() + got := ParseName(tt.in) + if !cmp.Equal(got, tt.want, cmpopts.IgnoreFields(Name{}, "processingDirective")) { + t.Errorf("ParseName() = %#v, want %#v", got, tt.want) + } + if gotStr := got.String(); gotStr != tt.wantStr { + t.Errorf("Name.String() = %s, want %s", gotStr, tt.wantStr) + } + }) + } +} + +// TestNameStringCtxParamsOrder covers the case that if two names differ only in +// context parameter __order__, the parsed name.String() has the same value. +func TestNameStringCtxParamsOrder(t *testing.T) { + const ( + a = "xdstp://auth/type/id?a=1&b=2" + b = "xdstp://auth/type/id?b=2&a=1" + ) + aParsed := ParseName(a).String() + bParsed := ParseName(b).String() + + if aParsed != bParsed { + t.Fatalf("aParsed.String() = %q, bParsed.String() = %q, want them to be the same", aParsed, bParsed) + } +} diff --git a/xds/internal/xdsclient/xdsresource/resource_type.go b/xds/internal/xdsclient/xdsresource/resource_type.go new file mode 100644 index 000000000000..f67f0ea15325 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/resource_type.go @@ -0,0 +1,164 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package xdsresource implements the xDS data model layer. +// +// Provides resource-type specific functionality to unmarshal xDS protos into +// internal data structures that contain only fields gRPC is interested in. +// These internal data structures are passed to components in the xDS stack +// (resolver/balancers/server) that have expressed interest in receiving +// updates to specific resources. +package xdsresource + +import ( + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" +) + +func init() { + internal.ResourceTypeMapForTesting = make(map[string]interface{}) + internal.ResourceTypeMapForTesting[version.V3ListenerURL] = listenerType + internal.ResourceTypeMapForTesting[version.V3RouteConfigURL] = routeConfigType + internal.ResourceTypeMapForTesting[version.V3ClusterURL] = clusterType + internal.ResourceTypeMapForTesting[version.V3EndpointsURL] = endpointsType +} + +// Producer contains a single method to discover resource configuration from a +// remote management server using xDS APIs. +// +// The xdsclient package provides a concrete implementation of this interface. +type Producer interface { + // WatchResource uses xDS to discover the resource associated with the + // provided resource name. The resource type implementation determines how + // xDS requests are sent out and how responses are deserialized and + // validated. Upon receipt of a response from the management server, an + // appropriate callback on the watcher is invoked. + WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) +} + +// ResourceWatcher wraps the callbacks to be invoked for different events +// corresponding to the resource being watched. +type ResourceWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + // The ResourceData parameter needs to be type asserted to the appropriate + // type for the resource being watched. + OnUpdate(ResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +// TODO: Once the implementation is complete, rename this interface as +// ResourceType and get rid of the existing ResourceType enum. + +// Type wraps all resource-type specific functionality. Each supported resource +// type will provide an implementation of this interface. +type Type interface { + // TypeURL is the xDS type URL of this resource type for v3 transport. + TypeURL() string + + // TypeName identifies resources in a transport protocol agnostic way. This + // can be used for logging/debugging purposes, as well in cases where the + // resource type name is to be uniquely identified but the actual + // functionality provided by the resource type is not required. + // + // TODO: once Type is renamed to ResourceType, rename TypeName to + // ResourceTypeName. + TypeName() string + + // AllResourcesRequiredInSotW indicates whether this resource type requires + // that all resources be present in every SotW response from the server. If + // true, a response that does not include a previously seen resource will be + // interpreted as a deletion of that resource. + AllResourcesRequiredInSotW() bool + + // Decode deserializes and validates an xDS resource serialized inside the + // provided `Any` proto, as received from the xDS management server. + // + // If protobuf deserialization fails or resource validation fails, + // returns a non-nil error. Otherwise, returns a fully populated + // DecodeResult. + Decode(*DecodeOptions, *anypb.Any) (*DecodeResult, error) +} + +// ResourceData contains the configuration data sent by the xDS management +// server, associated with the resource being watched. Every resource type must +// provide an implementation of this interface to represent the configuration +// received from the xDS management server. +type ResourceData interface { + isResourceData() + + // Equal returns true if the passed in resource data is equal to that of the + // receiver. + Equal(ResourceData) bool + + // ToJSON returns a JSON string representation of the resource data. + ToJSON() string + + Raw() *anypb.Any +} + +// DecodeOptions wraps the options required by ResourceType implementation for +// decoding configuration received from the xDS management server. +type DecodeOptions struct { + // BootstrapConfig contains the bootstrap configuration passed to the + // top-level xdsClient. This contains useful data for resource validation. + BootstrapConfig *bootstrap.Config +} + +// DecodeResult is the result of a decode operation. +type DecodeResult struct { + // Name is the name of the resource being watched. + Name string + // Resource contains the configuration associated with the resource being + // watched. + Resource ResourceData +} + +// resourceTypeState wraps the static state associated with concrete resource +// type implementations, which can then embed this struct and get the methods +// implemented here for free. +type resourceTypeState struct { + typeURL string + typeName string + allResourcesRequiredInSotW bool +} + +func (r resourceTypeState) TypeURL() string { + return r.typeURL +} + +func (r resourceTypeState) TypeName() string { + return r.typeName +} + +func (r resourceTypeState) AllResourcesRequiredInSotW() bool { + return r.allResourcesRequiredInSotW +} diff --git a/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go new file mode 100644 index 000000000000..8ce5cb28596e --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -0,0 +1,150 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // RouteConfigTypeName represents the transport agnostic name for the + // route config resource. + RouteConfigTypeName = "RouteConfigResource" +) + +var ( + // Compile time interface checks. + _ Type = routeConfigResourceType{} + + // Singleton instantiation of the resource type implementation. + routeConfigType = routeConfigResourceType{ + resourceTypeState: resourceTypeState{ + typeURL: version.V3RouteConfigURL, + typeName: "RouteConfigResource", + allResourcesRequiredInSotW: false, + }, + } +) + +// routeConfigResourceType provides the resource-type specific functionality for +// a RouteConfiguration resource. +// +// Implements the Type interface. +type routeConfigResourceType struct { + resourceTypeState +} + +// Decode deserializes and validates an xDS resource serialized inside the +// provided `Any` proto, as received from the xDS management server. +func (routeConfigResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { + name, rc, err := unmarshalRouteConfigResource(resource) + switch { + case name == "": + // Name is unset only when protobuf deserialization fails. + return nil, err + case err != nil: + // Protobuf deserialization succeeded, but resource validation failed. + return &DecodeResult{Name: name, Resource: &RouteConfigResourceData{Resource: RouteConfigUpdate{}}}, err + } + + return &DecodeResult{Name: name, Resource: &RouteConfigResourceData{Resource: rc}}, nil + +} + +// RouteConfigResourceData wraps the configuration of a RouteConfiguration +// resource as received from the management server. +// +// Implements the ResourceData interface. +type RouteConfigResourceData struct { + ResourceData + + // TODO: We have always stored update structs by value. See if this can be + // switched to a pointer? + Resource RouteConfigUpdate +} + +// Equal returns true if other is equal to r. +func (r *RouteConfigResourceData) Equal(other ResourceData) bool { + if r == nil && other == nil { + return true + } + if (r == nil) != (other == nil) { + return false + } + return proto.Equal(r.Resource.Raw, other.Raw()) + +} + +// ToJSON returns a JSON string representation of the resource data. +func (r *RouteConfigResourceData) ToJSON() string { + return pretty.ToJSON(r.Resource) +} + +// Raw returns the underlying raw protobuf form of the route configuration +// resource. +func (r *RouteConfigResourceData) Raw() *anypb.Any { + return r.Resource.Raw +} + +// RouteConfigWatcher wraps the callbacks to be invoked for different +// events corresponding to the route configuration resource being watched. +type RouteConfigWatcher interface { + // OnUpdate is invoked to report an update for the resource being watched. + OnUpdate(*RouteConfigResourceData) + + // OnError is invoked under different error conditions including but not + // limited to the following: + // - authority mentioned in the resource is not found + // - resource name parsing error + // - resource deserialization error + // - resource validation error + // - ADS stream failure + // - connection failure + OnError(error) + + // OnResourceDoesNotExist is invoked for a specific error condition where + // the requested resource is not found on the xDS management server. + OnResourceDoesNotExist() +} + +type delegatingRouteConfigWatcher struct { + watcher RouteConfigWatcher +} + +func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData) { + rc := data.(*RouteConfigResourceData) + d.watcher.OnUpdate(rc) +} + +func (d *delegatingRouteConfigWatcher) OnError(err error) { + d.watcher.OnError(err) +} + +func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist() { + d.watcher.OnResourceDoesNotExist() +} + +// WatchRouteConfig uses xDS to discover the configuration associated with the +// provided route configuration resource name. +func WatchRouteConfig(p Producer, name string, w RouteConfigWatcher) (cancel func()) { + delegator := &delegatingRouteConfigWatcher{watcher: w} + return p.WatchResource(routeConfigType, name, delegator) +} diff --git a/xds/internal/test/xds_integration_test.go b/xds/internal/xdsclient/xdsresource/test_utils_test.go similarity index 67% rename from xds/internal/test/xds_integration_test.go rename to xds/internal/xdsclient/xdsresource/test_utils_test.go index ae306ae7864e..04a15f96c248 100644 --- a/xds/internal/test/xds_integration_test.go +++ b/xds/internal/xdsclient/xdsresource/test_utils_test.go @@ -1,5 +1,3 @@ -// +build !386 - /* * * Copyright 2020 gRPC authors. @@ -15,23 +13,18 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ -// Package xds_test contains e2e tests for xDS use. -package xds_test +package xdsresource import ( - "context" "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "google.golang.org/grpc/internal/grpctest" - testpb "google.golang.org/grpc/test/grpc_testing" -) - -const ( - defaultTestTimeout = 10 * time.Second + "google.golang.org/protobuf/testing/protocmp" ) type s struct { @@ -42,10 +35,11 @@ func Test(t *testing.T) { grpctest.RunSubTests(t, s{}) } -type testService struct { - testpb.TestServiceServer -} - -func (*testService) EmptyCall(context.Context, *testpb.Empty) (*testpb.Empty, error) { - return &testpb.Empty{}, nil -} +var ( + cmpOpts = cmp.Options{ + cmpopts.EquateEmpty(), + cmp.FilterValues(func(x, y error) bool { return true }, cmpopts.EquateErrors()), + cmp.Comparer(func(a, b time.Time) bool { return true }), + protocmp.Transform(), + } +) diff --git a/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go new file mode 100644 index 000000000000..e8665925739b --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/tests/unmarshal_cds_test.go @@ -0,0 +1,604 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tests_test contains test cases for unmarshalling of CDS resources. +package tests_test + +import ( + "encoding/json" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + _ "google.golang.org/grpc/balancer/roundrobin" // To register round_robin load balancer. + "google.golang.org/grpc/internal/balancer/stub" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpctest" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/serviceconfig" + _ "google.golang.org/grpc/xds" // Register the xDS LB Registry Converters. + "google.golang.org/grpc/xds/internal/balancer/ringhash" + "google.golang.org/grpc/xds/internal/balancer/wrrlocality" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + "google.golang.org/protobuf/types/known/wrapperspb" + + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3roundrobinpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/round_robin/v3" + v3wrrlocalitypb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/wrr_locality/v3" + "github.com/golang/protobuf/proto" + anypb "github.com/golang/protobuf/ptypes/any" + structpb "github.com/golang/protobuf/ptypes/struct" +) + +type s struct { + grpctest.Tester +} + +func Test(t *testing.T) { + grpctest.RunSubTests(t, s{}) +} + +const ( + clusterName = "clusterName" + serviceName = "service" +) + +var emptyUpdate = xdsresource.ClusterUpdate{ClusterName: clusterName, LRSServerConfig: xdsresource.ClusterLRSOff} + +func wrrLocality(m proto.Message) *v3wrrlocalitypb.WrrLocality { + return &v3wrrlocalitypb.WrrLocality{ + EndpointPickingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(m), + }, + }, + }, + }, + } +} + +func wrrLocalityAny(m proto.Message) *anypb.Any { + return testutils.MarshalAny(wrrLocality(m)) +} + +type customLBConfig struct { + serviceconfig.LoadBalancingConfig +} + +// We have this test in a separate test package in order to not take a +// dependency on the internal xDS balancer packages within the xDS Client. +func (s) TestValidateCluster_Success(t *testing.T) { + const customLBPolicyName = "myorg.MyCustomLeastRequestPolicy" + stub.Register(customLBPolicyName, stub.BalancerFuncs{ + ParseConfig: func(json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + return customLBConfig{}, nil + }, + }) + + origCustomLBSupport := envconfig.XDSCustomLBPolicy + envconfig.XDSCustomLBPolicy = true + defer func() { + envconfig.XDSCustomLBPolicy = origCustomLBSupport + }() + tests := []struct { + name string + cluster *v3clusterpb.Cluster + wantUpdate xdsresource.ClusterUpdate + wantLBConfig *iserviceconfig.BalancerConfig + customLBDisabled bool + }{ + { + name: "happy-case-logical-dns", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ + Endpoints: []*v3endpointpb.LocalityLbEndpoints{{ + LbEndpoints: []*v3endpointpb.LbEndpoint{{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "dns_host", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 8080, + }, + }, + }, + }, + }, + }, + }}, + }}, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, + ClusterType: xdsresource.ClusterTypeLogicalDNS, + DNSHostName: "dns_host:8080", + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happy-case-aggregate-v3", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: []string{"a", "b", "c"}, + }), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, LRSServerConfig: xdsresource.ClusterLRSOff, ClusterType: xdsresource.ClusterTypeAggregate, + PrioritizedClusterNames: []string{"a", "b", "c"}, + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happy-case-no-service-name-no-lrs", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: emptyUpdate, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happy-case-no-lrs", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSOff}, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happiest-case", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf}, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happiest-case-with-circuitbreakers", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + CircuitBreakers: &v3clusterpb.CircuitBreakers{ + Thresholds: []*v3clusterpb.CircuitBreakers_Thresholds{ + { + Priority: v3corepb.RoutingPriority_DEFAULT, + MaxRequests: wrapperspb.UInt32(512), + }, + { + Priority: v3corepb.RoutingPriority_HIGH, + MaxRequests: nil, + }, + }, + }, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, MaxRequests: func() *uint32 { i := uint32(512); return &i }()}, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happiest-case-with-ring-hash-lb-policy-with-default-config", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 1024, + MaxRingSize: 4096, + }, + }, + }, + { + name: "happiest-case-with-ring-hash-lb-policy-with-none-default-config", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }, + }, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, LRSServerConfig: xdsresource.ClusterLRSServerSelf, + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 10, + MaxRingSize: 100, + }, + }, + }, + { + name: "happiest-case-with-ring-hash-lb-policy-configured-through-LoadBalancingPolicy", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 10, + MaxRingSize: 100, + }, + }, + }, + { + name: "happiest-case-with-wrrlocality-rr-child-configured-through-LoadBalancingPolicy", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(&v3roundrobinpb.RoundRobin{}), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "round_robin", + }, + }, + }, + }, + { + name: "happiest-case-with-custom-lb-configured-through-LoadBalancingPolicy", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: wrrLocalityAny(&v3xdsxdstypepb.TypedStruct{ + TypeUrl: "type.googleapis.com/myorg.MyCustomLeastRequestPolicy", + Value: &structpb.Struct{}, + }), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: wrrlocality.Name, + Config: &wrrlocality.LBConfig{ + ChildPolicy: &iserviceconfig.BalancerConfig{ + Name: "myorg.MyCustomLeastRequestPolicy", + Config: customLBConfig{}, + }, + }, + }, + }, + { + name: "custom-lb-env-var-not-set-ignore-load-balancing-policy-use-lb-policy-and-enum", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(20), + MaximumRingSize: wrapperspb.UInt64(200), + }, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 20, + MaxRingSize: 200, + }, + }, + customLBDisabled: true, + }, + { + name: "load-balancing-policy-takes-precedence-over-lb-policy-and-enum", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MinimumRingSize: wrapperspb.UInt64(20), + MaximumRingSize: wrapperspb.UInt64(200), + }, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(100), + }), + }, + }, + }, + }, + }, + wantUpdate: xdsresource.ClusterUpdate{ + ClusterName: clusterName, EDSServiceName: serviceName, + }, + wantLBConfig: &iserviceconfig.BalancerConfig{ + Name: "ring_hash_experimental", + Config: &ringhash.LBConfig{ + MinRingSize: 10, + MaxRingSize: 100, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.customLBDisabled { + envconfig.XDSCustomLBPolicy = false + defer func() { + envconfig.XDSCustomLBPolicy = true + }() + } + update, err := xdsresource.ValidateClusterAndConstructClusterUpdateForTesting(test.cluster) + if err != nil { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) failed: %v", test.cluster, err) + } + // Ignore the raw JSON string into the cluster update. JSON bytes + // are nondeterministic (whitespace etc.) so we cannot reliably + // compare JSON bytes in a test. Thus, marshal into a Balancer + // Config struct and compare on that. Only need to test this JSON + // emission here, as this covers the possible output space. + if diff := cmp.Diff(update, test.wantUpdate, cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "LBPolicy")); diff != "" { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) got diff: %v (-got, +want)", test.cluster, diff) + } + bc := &iserviceconfig.BalancerConfig{} + if err := json.Unmarshal(update.LBPolicy, bc); err != nil { + t.Fatalf("failed to unmarshal JSON: %v", err) + } + if diff := cmp.Diff(bc, test.wantLBConfig); diff != "" { + t.Fatalf("update.LBConfig got unexpected output, diff (-got +want): %v", diff) + } + }) + } +} diff --git a/xds/internal/xdsclient/xdsresource/type.go b/xds/internal/xdsclient/xdsresource/type.go new file mode 100644 index 000000000000..0fb3f274ed46 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// UpdateValidatorFunc performs validations on update structs using +// context/logic available at the xdsClient layer. Since these validation are +// performed on internal update structs, they can be shared between different +// API clients. +type UpdateValidatorFunc func(interface{}) error + +// UpdateMetadata contains the metadata for each update, including timestamp, +// raw message, and so on. +type UpdateMetadata struct { + // Status is the status of this resource, e.g. ACKed, NACKed, or + // Not_exist(removed). + Status ServiceStatus + // Version is the version of the xds response. Note that this is the version + // of the resource in use (previous ACKed). If a response is NACKed, the + // NACKed version is in ErrState. + Version string + // Timestamp is when the response is received. + Timestamp time.Time + // ErrState is set when the update is NACKed. + ErrState *UpdateErrorMetadata +} + +// IsListenerResource returns true if the provider URL corresponds to an xDS +// Listener resource. +func IsListenerResource(url string) bool { + return url == version.V3ListenerURL +} + +// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS +// HTTPConnManager resource. +func IsHTTPConnManagerResource(url string) bool { + return url == version.V3HTTPConnManagerURL +} + +// IsRouteConfigResource returns true if the provider URL corresponds to an xDS +// RouteConfig resource. +func IsRouteConfigResource(url string) bool { + return url == version.V3RouteConfigURL +} + +// IsClusterResource returns true if the provider URL corresponds to an xDS +// Cluster resource. +func IsClusterResource(url string) bool { + return url == version.V3ClusterURL +} + +// IsEndpointsResource returns true if the provider URL corresponds to an xDS +// Endpoints resource. +func IsEndpointsResource(url string) bool { + return url == version.V3EndpointsURL +} + +// UnwrapResource unwraps and returns the inner resource if it's in a resource +// wrapper. The original resource is returned if it's not wrapped. +func UnwrapResource(r *anypb.Any) (*anypb.Any, error) { + url := r.GetTypeUrl() + if url != version.V3ResourceWrapperURL { + // Not wrapped. + return r, nil + } + inner := &v3discoverypb.Resource{} + if err := proto.Unmarshal(r.GetValue(), inner); err != nil { + return nil, err + } + return inner.Resource, nil +} + +// ServiceStatus is the status of the update. +type ServiceStatus int + +const ( + // ServiceStatusUnknown is the default state, before a watch is started for + // the resource. + ServiceStatusUnknown ServiceStatus = iota + // ServiceStatusRequested is when the watch is started, but before and + // response is received. + ServiceStatusRequested + // ServiceStatusNotExist is when the resource doesn't exist in + // state-of-the-world responses (e.g. LDS and CDS), which means the resource + // is removed by the management server. + ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS. + // ServiceStatusACKed is when the resource is ACKed. + ServiceStatusACKed + // ServiceStatusNACKed is when the resource is NACKed. + ServiceStatusNACKed +) + +// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state +// when a response is NACKed. +type UpdateErrorMetadata struct { + // Version is the version of the NACKed response. + Version string + // Err contains why the response was NACKed. + Err error + // Timestamp is when the NACKed response was received. + Timestamp time.Time +} + +// UpdateWithMD contains the raw message of the update and the metadata, +// including version, raw message, timestamp. +// +// This is to be used for config dump and CSDS, not directly by users (like +// resolvers/balancers). +type UpdateWithMD struct { + MD UpdateMetadata + Raw *anypb.Any +} diff --git a/xds/internal/xdsclient/xdsresource/type_cds.go b/xds/internal/xdsclient/xdsresource/type_cds.go new file mode 100644 index 000000000000..269d9ebdae15 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_cds.go @@ -0,0 +1,96 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "encoding/json" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ClusterType is the type of cluster from a received CDS response. +type ClusterType int + +const ( + // ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint + // discovery to the management server. + ClusterTypeEDS ClusterType = iota + // ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially + // maps to the gRPC behavior of using the DNS resolver with pick_first LB policy. + ClusterTypeLogicalDNS + // ClusterTypeAggregate represents the Aggregate Cluster type, which provides a + // prioritized list of clusters to use. It is used for failover between clusters + // with a different configuration. + ClusterTypeAggregate +) + +// ClusterLRSServerConfigType is the type of LRS server config. +type ClusterLRSServerConfigType int + +const ( + // ClusterLRSOff indicates LRS is off (loads are not reported for this + // cluster). + ClusterLRSOff ClusterLRSServerConfigType = iota + // ClusterLRSServerSelf indicates loads should be reported to the same + // server (the authority) where the CDS resp is received from. + ClusterLRSServerSelf +) + +// ClusterUpdate contains information from a received CDS response, which is of +// interest to the registered CDS watcher. +type ClusterUpdate struct { + ClusterType ClusterType + // ClusterName is the clusterName being watched for through CDS. + ClusterName string + // EDSServiceName is an optional name for EDS. If it's not set, the balancer + // should watch ClusterName for the EDS resources. + EDSServiceName string + // LRSServerConfig contains the server where the load reports should be sent + // to. This can be change to an interface, to support other types, e.g. a + // ServerConfig with ServerURI, creds. + LRSServerConfig ClusterLRSServerConfigType + // SecurityCfg contains security configuration sent by the control plane. + SecurityCfg *SecurityConfig + // MaxRequests for circuit breaking, if any (otherwise nil). + MaxRequests *uint32 + // DNSHostName is used only for cluster type DNS. It's the DNS name to + // resolve in "host:port" form + DNSHostName string + // PrioritizedClusterNames is used only for cluster type aggregate. It represents + // a prioritized list of cluster names. + PrioritizedClusterNames []string + + // LBPolicy represents the locality and endpoint picking policy in JSON, + // which will be the child policy of xds_cluster_impl. + LBPolicy json.RawMessage + + // OutlierDetection is the outlier detection configuration for this cluster. + // If nil, it means this cluster does not use the outlier detection feature. + OutlierDetection json.RawMessage + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// ClusterUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ClusterUpdateErrTuple struct { + Update ClusterUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/type_eds.go b/xds/internal/xdsclient/xdsresource/type_eds.go new file mode 100644 index 000000000000..1254d250c99b --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_eds.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/types/known/anypb" +) + +// OverloadDropConfig contains the config to drop overloads. +type OverloadDropConfig struct { + Category string + Numerator uint32 + Denominator uint32 +} + +// EndpointHealthStatus represents the health status of an endpoint. +type EndpointHealthStatus int32 + +const ( + // EndpointHealthStatusUnknown represents HealthStatus UNKNOWN. + EndpointHealthStatusUnknown EndpointHealthStatus = iota + // EndpointHealthStatusHealthy represents HealthStatus HEALTHY. + EndpointHealthStatusHealthy + // EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY. + EndpointHealthStatusUnhealthy + // EndpointHealthStatusDraining represents HealthStatus DRAINING. + EndpointHealthStatusDraining + // EndpointHealthStatusTimeout represents HealthStatus TIMEOUT. + EndpointHealthStatusTimeout + // EndpointHealthStatusDegraded represents HealthStatus DEGRADED. + EndpointHealthStatusDegraded +) + +// Endpoint contains information of an endpoint. +type Endpoint struct { + Address string + HealthStatus EndpointHealthStatus + Weight uint32 +} + +// Locality contains information of a locality. +type Locality struct { + Endpoints []Endpoint + ID internal.LocalityID + Priority uint32 + Weight uint32 +} + +// EndpointsUpdate contains an EDS update. +type EndpointsUpdate struct { + Drops []OverloadDropConfig + // Localities in the EDS response with `load_balancing_weight` field not set + // or explicitly set to 0 are ignored while parsing the resource, and + // therefore do not show up here. + Localities []Locality + + // Raw is the resource from the xds response. + Raw *anypb.Any +} diff --git a/xds/internal/xdsclient/xdsresource/type_lds.go b/xds/internal/xdsclient/xdsresource/type_lds.go new file mode 100644 index 000000000000..a2742fb4371a --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_lds.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "time" + + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// ListenerUpdate contains information received in an LDS response, which is of +// interest to the registered LDS watcher. +type ListenerUpdate struct { + // RouteConfigName is the route configuration name corresponding to the + // target which is being watched through LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + RouteConfigName string + // InlineRouteConfig is the inline route configuration (RDS response) + // returned inside LDS. + // + // Exactly one of RouteConfigName and InlineRouteConfig is set. + InlineRouteConfig *RouteConfigUpdate + + // MaxStreamDuration contains the HTTP connection manager's + // common_http_protocol_options.max_stream_duration field, or zero if + // unset. + MaxStreamDuration time.Duration + // HTTPFilters is a list of HTTP filters (name, config) from the LDS + // response. + HTTPFilters []HTTPFilter + // InboundListenerCfg contains inbound listener configuration. + InboundListenerCfg *InboundListenerConfig + + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection +// manager field. +type HTTPFilter struct { + // Name is an arbitrary name of the filter. Used for applying override + // settings in virtual host / route / weighted cluster configuration (not + // yet supported). + Name string + // Filter is the HTTP filter found in the registry for the config type. + Filter httpfilter.Filter + // Config contains the filter's configuration + Config httpfilter.FilterConfig +} + +// InboundListenerConfig contains information about the inbound listener, i.e +// the server-side listener. +type InboundListenerConfig struct { + // Address is the local address on which the inbound listener is expected to + // accept incoming connections. + Address string + // Port is the local port on which the inbound listener is expected to + // accept incoming connections. + Port string + // FilterChains is the list of filter chains associated with this listener. + FilterChains *FilterChainManager +} + +// ListenerUpdateErrTuple is a tuple with the update and error. It contains the +// results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type ListenerUpdateErrTuple struct { + Update ListenerUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/type_rds.go b/xds/internal/xdsclient/xdsresource/type_rds.go new file mode 100644 index 000000000000..ad59209163e7 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/type_rds.go @@ -0,0 +1,256 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "regexp" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +// RouteConfigUpdate contains information received in an RDS response, which is +// of interest to the registered RDS watcher. +type RouteConfigUpdate struct { + VirtualHosts []*VirtualHost + // ClusterSpecifierPlugins are the LB Configurations for any + // ClusterSpecifierPlugins referenced by the Route Table. + ClusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig + // Raw is the resource from the xds response. + Raw *anypb.Any +} + +// VirtualHost contains the routes for a list of Domains. +// +// Note that the domains in this slice can be a wildcard, not an exact string. +// The consumer of this struct needs to find the best match for its hostname. +type VirtualHost struct { + Domains []string + // Routes contains a list of routes, each containing matchers and + // corresponding action. + Routes []*Route + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the virtual host which may be present. An individual filter's override + // may be unused if the matching Route contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig +} + +// RetryConfig contains all retry-related configuration in either a VirtualHost +// or Route. +type RetryConfig struct { + // RetryOn is a set of status codes on which to retry. Only Canceled, + // DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are + // supported; any other values will be omitted. + RetryOn map[codes.Code]bool + NumRetries uint32 // maximum number of retry attempts + RetryBackoff RetryBackoff // retry backoff policy +} + +// RetryBackoff describes the backoff policy for retries. +type RetryBackoff struct { + BaseInterval time.Duration // initial backoff duration between attempts + MaxInterval time.Duration // maximum backoff duration +} + +// HashPolicyType specifies the type of HashPolicy from a received RDS Response. +type HashPolicyType int + +const ( + // HashPolicyTypeHeader specifies to hash a Header in the incoming request. + HashPolicyTypeHeader HashPolicyType = iota + // HashPolicyTypeChannelID specifies to hash a unique Identifier of the + // Channel. This is a 64-bit random int computed at initialization time. + HashPolicyTypeChannelID +) + +// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing +// load balancer. +type HashPolicy struct { + HashPolicyType HashPolicyType + Terminal bool + // Fields used for type HEADER. + HeaderName string + Regex *regexp.Regexp + RegexSubstitution string +} + +// RouteActionType is the action of the route from a received RDS response. +type RouteActionType int + +const ( + // RouteActionUnsupported are routing types currently unsupported by grpc. + // According to A36, "A Route with an inappropriate action causes RPCs + // matching that route to fail." + RouteActionUnsupported RouteActionType = iota + // RouteActionRoute is the expected route type on the client side. Route + // represents routing a request to some upstream cluster. On the client + // side, if an RPC matches to a route that is not RouteActionRoute, the RPC + // will fail according to A36. + RouteActionRoute + // RouteActionNonForwardingAction is the expected route type on the server + // side. NonForwardingAction represents when a route will generate a + // response directly, without forwarding to an upstream host. + RouteActionNonForwardingAction +) + +// Route is both a specification of how to match a request as well as an +// indication of the action to take upon match. +type Route struct { + Path *string + Prefix *string + Regex *regexp.Regexp + // Indicates if prefix/path matching should be case insensitive. The default + // is false (case sensitive). + CaseInsensitive bool + Headers []*HeaderMatcher + Fraction *uint32 + + HashPolicies []*HashPolicy + + // If the matchers above indicate a match, the below configuration is used. + // If MaxStreamDuration is nil, it indicates neither of the route action's + // max_stream_duration fields (grpc_timeout_header_max nor + // max_stream_duration) were set. In this case, the ListenerUpdate's + // MaxStreamDuration field should be used. If MaxStreamDuration is set to + // an explicit zero duration, the application's deadline should be used. + MaxStreamDuration *time.Duration + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the route which may be present. An individual filter's override may be + // unused if the matching WeightedCluster contains an override for that + // filter. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig + RetryConfig *RetryConfig + + ActionType RouteActionType + + // Only one of the following fields (WeightedClusters or + // ClusterSpecifierPlugin) will be set for a route. + WeightedClusters map[string]WeightedCluster + // ClusterSpecifierPlugin is the name of the Cluster Specifier Plugin that + // this Route is linked to, if specified by xDS. + ClusterSpecifierPlugin string +} + +// WeightedCluster contains settings for an xds ActionType.WeightedCluster. +type WeightedCluster struct { + // Weight is the relative weight of the cluster. It will never be zero. + Weight uint32 + // HTTPFilterConfigOverride contains any HTTP filter config overrides for + // the weighted cluster which may be present. + HTTPFilterConfigOverride map[string]httpfilter.FilterConfig +} + +// HeaderMatcher represents header matchers. +type HeaderMatcher struct { + Name string + InvertMatch *bool + ExactMatch *string + RegexMatch *regexp.Regexp + PrefixMatch *string + SuffixMatch *string + RangeMatch *Int64Range + PresentMatch *bool + StringMatch *matcher.StringMatcher +} + +// Int64Range is a range for header range match. +type Int64Range struct { + Start int64 + End int64 +} + +// SecurityConfig contains the security configuration received as part of the +// Cluster resource on the client-side, and as part of the Listener resource on +// the server-side. +type SecurityConfig struct { + // RootInstanceName identifies the certProvider plugin to be used to fetch + // root certificates. This instance name will be resolved to the plugin name + // and its associated configuration from the certificate_providers field of + // the bootstrap file. + RootInstanceName string + // RootCertName is the certificate name to be passed to the plugin (looked + // up from the bootstrap file) while fetching root certificates. + RootCertName string + // IdentityInstanceName identifies the certProvider plugin to be used to + // fetch identity certificates. This instance name will be resolved to the + // plugin name and its associated configuration from the + // certificate_providers field of the bootstrap file. + IdentityInstanceName string + // IdentityCertName is the certificate name to be passed to the plugin + // (looked up from the bootstrap file) while fetching identity certificates. + IdentityCertName string + // SubjectAltNameMatchers is an optional list of match criteria for SANs + // specified on the peer certificate. Used only on the client-side. + // + // Some intricacies: + // - If this field is empty, then any peer certificate is accepted. + // - If the peer certificate contains a wildcard DNS SAN, and an `exact` + // matcher is configured, a wildcard DNS match is performed instead of a + // regular string comparison. + SubjectAltNameMatchers []matcher.StringMatcher + // RequireClientCert indicates if the server handshake process expects the + // client to present a certificate. Set to true when performing mTLS. Used + // only on the server-side. + RequireClientCert bool +} + +// Equal returns true if sc is equal to other. +func (sc *SecurityConfig) Equal(other *SecurityConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + } + switch { + case sc.RootInstanceName != other.RootInstanceName: + return false + case sc.RootCertName != other.RootCertName: + return false + case sc.IdentityInstanceName != other.IdentityInstanceName: + return false + case sc.IdentityCertName != other.IdentityCertName: + return false + case sc.RequireClientCert != other.RequireClientCert: + return false + default: + if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) { + return false + } + for i := 0; i < len(sc.SubjectAltNameMatchers); i++ { + if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) { + return false + } + } + } + return true +} + +// RouteConfigUpdateErrTuple is a tuple with the update and error. It contains +// the results from unmarshal functions. It's used to pass unmarshal results of +// multiple resources together, e.g. in maps like `map[string]{Update,error}`. +type RouteConfigUpdateErrTuple struct { + Update RouteConfigUpdate + Err error +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go new file mode 100644 index 000000000000..cb1d0b2dfdcb --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -0,0 +1,672 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "strconv" + "strings" + "time" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + "github.com/golang/protobuf/proto" + + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" + iserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/anypb" +) + +// ValidateClusterAndConstructClusterUpdateForTesting exports the +// validateClusterAndConstructClusterUpdate function for testing purposes. +var ValidateClusterAndConstructClusterUpdateForTesting = validateClusterAndConstructClusterUpdate + +// TransportSocket proto message has a `name` field which is expected to be set +// to this value by the management server. +const transportSocketName = "envoy.transport_sockets.tls" + +func unmarshalClusterResource(r *anypb.Any) (string, ClusterUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsClusterResource(r.GetTypeUrl()) { + return "", ClusterUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cluster := &v3clusterpb.Cluster{} + if err := proto.Unmarshal(r.GetValue(), cluster); err != nil { + return "", ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + cu, err := validateClusterAndConstructClusterUpdate(cluster) + if err != nil { + return cluster.GetName(), ClusterUpdate{}, err + } + cu.Raw = r + + return cluster.GetName(), cu, nil +} + +const ( + defaultRingHashMinSize = 1024 + defaultRingHashMaxSize = 8 * 1024 * 1024 // 8M + ringHashSizeUpperBound = 8 * 1024 * 1024 // 8M +) + +func validateClusterAndConstructClusterUpdate(cluster *v3clusterpb.Cluster) (ClusterUpdate, error) { + var lbPolicy json.RawMessage + var err error + switch cluster.GetLbPolicy() { + case v3clusterpb.Cluster_ROUND_ROBIN: + lbPolicy = []byte(`[{"xds_wrr_locality_experimental": {"childPolicy": [{"round_robin": {}}]}}]`) + case v3clusterpb.Cluster_RING_HASH: + if !envconfig.XDSRingHash { + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + rhc := cluster.GetRingHashLbConfig() + if rhc.GetHashFunction() != v3clusterpb.Cluster_RingHashLbConfig_XX_HASH { + return ClusterUpdate{}, fmt.Errorf("unsupported ring_hash hash function %v in response: %+v", rhc.GetHashFunction(), cluster) + } + // Minimum defaults to 1024 entries, and limited to 8M entries Maximum + // defaults to 8M entries, and limited to 8M entries + var minSize, maxSize uint64 = defaultRingHashMinSize, defaultRingHashMaxSize + if min := rhc.GetMinimumRingSize(); min != nil { + minSize = min.GetValue() + } + if max := rhc.GetMaximumRingSize(); max != nil { + maxSize = max.GetValue() + } + + rhLBCfg := []byte(fmt.Sprintf("{\"minRingSize\": %d, \"maxRingSize\": %d}", minSize, maxSize)) + lbPolicy = []byte(fmt.Sprintf(`[{"ring_hash_experimental": %s}]`, rhLBCfg)) + default: + return ClusterUpdate{}, fmt.Errorf("unexpected lbPolicy %v in response: %+v", cluster.GetLbPolicy(), cluster) + } + // Process security configuration received from the control plane iff the + // corresponding environment variable is set. + var sc *SecurityConfig + if envconfig.XDSClientSideSecurity { + var err error + if sc, err = securityConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + } + + // Process outlier detection received from the control plane iff the + // corresponding environment variable is set. + var od json.RawMessage + if envconfig.XDSOutlierDetection { + var err error + if od, err = outlierConfigFromCluster(cluster); err != nil { + return ClusterUpdate{}, err + } + } + + if cluster.GetLoadBalancingPolicy() != nil && envconfig.XDSCustomLBPolicy { + lbPolicy, err = xdslbregistry.ConvertToServiceConfig(cluster.GetLoadBalancingPolicy(), 0) + if err != nil { + return ClusterUpdate{}, fmt.Errorf("error converting LoadBalancingPolicy %v in response: %+v: %v", cluster.GetLoadBalancingPolicy(), cluster, err) + } + // "It will be the responsibility of the XdsClient to validate the + // converted configuration. It will do this by having the gRPC LB policy + // registry parse the configuration." - A52 + bc := &iserviceconfig.BalancerConfig{} + if err := json.Unmarshal(lbPolicy, bc); err != nil { + return ClusterUpdate{}, fmt.Errorf("JSON generated from xDS LB policy registry: %s is invalid: %v", pretty.FormatJSON(lbPolicy), err) + } + } + + ret := ClusterUpdate{ + ClusterName: cluster.GetName(), + SecurityCfg: sc, + MaxRequests: circuitBreakersFromCluster(cluster), + LBPolicy: lbPolicy, + OutlierDetection: od, + } + + // Note that this is different from the gRFC (gRFC A47 says to include the + // full ServerConfig{URL,creds,server feature} here). This information is + // not available here, because this function doesn't have access to the + // xdsclient bootstrap information now (can be added if necessary). The + // ServerConfig will be read and populated by the CDS balancer when + // processing this field. + // According to A27: + // If the `lrs_server` field is set, it must have its `self` field set, in + // which case the client should use LRS for load reporting. Otherwise + // (the `lrs_server` field is not set), LRS load reporting will be disabled. + if lrs := cluster.GetLrsServer(); lrs != nil { + if lrs.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("unsupported config_source_specifier %T in lrs_server field", lrs.ConfigSourceSpecifier) + } + ret.LRSServerConfig = ClusterLRSServerSelf + } + + // Validate and set cluster type from the response. + switch { + case cluster.GetType() == v3clusterpb.Cluster_EDS: + if configsource := cluster.GetEdsClusterConfig().GetEdsConfig(); configsource.GetAds() == nil && configsource.GetSelf() == nil { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS config source is not ADS or Self: %+v", cluster) + } + ret.ClusterType = ClusterTypeEDS + ret.EDSServiceName = cluster.GetEdsClusterConfig().GetServiceName() + if strings.HasPrefix(ret.ClusterName, "xdstp:") && ret.EDSServiceName == "" { + return ClusterUpdate{}, fmt.Errorf("CDS's EDS service name is not set with a new-style cluster name: %+v", cluster) + } + return ret, nil + case cluster.GetType() == v3clusterpb.Cluster_LOGICAL_DNS: + if !envconfig.XDSAggregateAndDNS { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } + ret.ClusterType = ClusterTypeLogicalDNS + dnsHN, err := dnsHostNameFromCluster(cluster) + if err != nil { + return ClusterUpdate{}, err + } + ret.DNSHostName = dnsHN + return ret, nil + case cluster.GetClusterType() != nil && cluster.GetClusterType().Name == "envoy.clusters.aggregate": + if !envconfig.XDSAggregateAndDNS { + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } + clusters := &v3aggregateclusterpb.ClusterConfig{} + if err := proto.Unmarshal(cluster.GetClusterType().GetTypedConfig().GetValue(), clusters); err != nil { + return ClusterUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + if len(clusters.Clusters) == 0 { + return ClusterUpdate{}, fmt.Errorf("xds: aggregate cluster has empty clusters field in response: %+v", cluster) + } + ret.ClusterType = ClusterTypeAggregate + ret.PrioritizedClusterNames = clusters.Clusters + return ret, nil + default: + return ClusterUpdate{}, fmt.Errorf("unsupported cluster type (%v, %v) in response: %+v", cluster.GetType(), cluster.GetClusterType(), cluster) + } +} + +// dnsHostNameFromCluster extracts the DNS host name from the cluster's load +// assignment. +// +// There should be exactly one locality, with one endpoint, whose address +// contains the address and port. +func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { + loadAssignment := cluster.GetLoadAssignment() + if loadAssignment == nil { + return "", fmt.Errorf("load_assignment not present for LOGICAL_DNS cluster") + } + if len(loadAssignment.GetEndpoints()) != 1 { + return "", fmt.Errorf("load_assignment for LOGICAL_DNS cluster must have exactly one locality, got: %+v", loadAssignment) + } + endpoints := loadAssignment.GetEndpoints()[0].GetLbEndpoints() + if len(endpoints) != 1 { + return "", fmt.Errorf("locality for LOGICAL_DNS cluster must have exactly one endpoint, got: %+v", endpoints) + } + endpoint := endpoints[0].GetEndpoint() + if endpoint == nil { + return "", fmt.Errorf("endpoint for LOGICAL_DNS cluster not set") + } + socketAddr := endpoint.GetAddress().GetSocketAddress() + if socketAddr == nil { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set") + } + if socketAddr.GetResolverName() != "" { + return "", fmt.Errorf("socket address for endpoint for LOGICAL_DNS cluster not set has unexpected custom resolver name: %v", socketAddr.GetResolverName()) + } + host := socketAddr.GetAddress() + if host == "" { + return "", fmt.Errorf("host for endpoint for LOGICAL_DNS cluster not set") + } + port := socketAddr.GetPortValue() + if port == 0 { + return "", fmt.Errorf("port for endpoint for LOGICAL_DNS cluster not set") + } + return net.JoinHostPort(host, strconv.Itoa(int(port))), nil +} + +// securityConfigFromCluster extracts the relevant security configuration from +// the received Cluster resource. +func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { + if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { + return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) + } + // The Cluster resource contains a `transport_socket` field, which contains + // a oneof `typed_config` field of type `protobuf.Any`. The any proto + // contains a marshaled representation of an `UpstreamTlsContext` message. + ts := cluster.GetTransportSocket() + if ts == nil { + return nil, nil + } + if name := ts.GetName(); name != transportSocketName { + return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) + } + any := ts.GetTypedConfig() + if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + } + upstreamCtx := &v3tlspb.UpstreamTlsContext{} + if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { + return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) + } + // The following fields from `UpstreamTlsContext` are ignored: + // - sni + // - allow_renegotiation + // - max_session_keys + if upstreamCtx.GetCommonTlsContext() == nil { + return nil, errors.New("UpstreamTlsContext in CDS response does not contain a CommonTlsContext") + } + + return securityConfigFromCommonTLSContext(upstreamCtx.GetCommonTlsContext(), false) +} + +// common is expected to be not nil. +// The `alpn_protocols` field is ignored. +func securityConfigFromCommonTLSContext(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + if common.GetTlsParams() != nil { + return nil, fmt.Errorf("unsupported tls_params field in CommonTlsContext message: %+v", common) + } + if common.GetCustomHandshaker() != nil { + return nil, fmt.Errorf("unsupported custom_handshaker field in CommonTlsContext message: %+v", common) + } + + // For now, if we can't get a valid security config from the new fields, we + // fallback to the old deprecated fields. + // TODO: Drop support for deprecated fields. NACK if err != nil here. + sc, _ := securityConfigFromCommonTLSContextUsingNewFields(common, server) + if sc == nil || sc.Equal(&SecurityConfig{}) { + var err error + sc, err = securityConfigFromCommonTLSContextWithDeprecatedFields(common, server) + if err != nil { + return nil, err + } + } + if sc != nil { + // sc == nil is a valid case where the control plane has not sent us any + // security configuration. xDS creds will use fallback creds. + if server { + if sc.IdentityInstanceName == "" { + return nil, errors.New("security configuration on the server-side does not contain identity certificate provider instance name") + } + } else { + if sc.RootInstanceName == "" { + return nil, errors.New("security configuration on the client-side does not contain root certificate provider instance name") + } + } + } + return sc, nil +} + +func securityConfigFromCommonTLSContextWithDeprecatedFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `CommonTlsContext` contains a + // `tls_certificate_certificate_provider_instance` field of type + // `CertificateProviderInstance`, which contains the provider instance name + // and the certificate name to fetch identity certs. + sc := &SecurityConfig{} + if identity := common.GetTlsCertificateCertificateProviderInstance(); identity != nil { + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + } + + // The `CommonTlsContext` contains a `validation_context_type` field which + // is a oneof. We can get the values that we are interested in from two of + // those possible values: + // - combined validation context: + // - contains a default validation context which holds the list of + // matchers for accepted SANs. + // - contains certificate provider instance configuration + // - certificate provider instance configuration + // - in this case, we do not get a list of accepted SANs. + switch t := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + combined := common.GetCombinedValidationContext() + var matchers []matcher.StringMatcher + if def := combined.GetDefaultValidationContext(); def != nil { + for _, m := range def.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + if pi := combined.GetValidationContextCertificateProviderInstance(); pi != nil { + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + } + case *v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance: + pi := common.GetValidationContextCertificateProviderInstance() + sc.RootInstanceName = pi.GetInstanceName() + sc.RootCertName = pi.GetCertificateName() + case nil: + // It is valid for the validation context to be nil on the server side. + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", t) + } + return sc, nil +} + +// gRFC A29 https://github.com/grpc/proposal/blob/master/A29-xds-tls-security.md +// specifies the new way to fetch security configuration and says the following: +// +// Although there are various ways to obtain certificates as per this proto +// (which are supported by Envoy), gRPC supports only one of them and that is +// the `CertificateProviderPluginInstance` proto. +// +// This helper function attempts to fetch security configuration from the +// `CertificateProviderPluginInstance` message, given a CommonTlsContext. +func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsContext, server bool) (*SecurityConfig, error) { + // The `tls_certificate_provider_instance` field of type + // `CertificateProviderPluginInstance` is used to fetch the identity + // certificate provider. + sc := &SecurityConfig{} + identity := common.GetTlsCertificateProviderInstance() + if identity == nil && len(common.GetTlsCertificates()) != 0 { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificates is set in CommonTlsContext message: %+v", common) + } + if identity == nil && common.GetTlsCertificateSdsSecretConfigs() != nil { + return nil, fmt.Errorf("expected field tls_certificate_provider_instance is not set, while unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message: %+v", common) + } + sc.IdentityInstanceName = identity.GetInstanceName() + sc.IdentityCertName = identity.GetCertificateName() + + // The `CommonTlsContext` contains a oneof field `validation_context_type`, + // which contains the `CertificateValidationContext` message in one of the + // following ways: + // - `validation_context` field + // - this is directly of type `CertificateValidationContext` + // - `combined_validation_context` field + // - this is of type `CombinedCertificateValidationContext` and contains + // a `default validation context` field of type + // `CertificateValidationContext` + // + // The `CertificateValidationContext` message has the following fields that + // we are interested in: + // - `ca_certificate_provider_instance` + // - this is of type `CertificateProviderPluginInstance` + // - `match_subject_alt_names` + // - this is a list of string matchers + // + // The `CertificateProviderPluginInstance` message contains two fields + // - instance_name + // - this is the certificate provider instance name to be looked up in + // the bootstrap configuration + // - certificate_name + // - this is an opaque name passed to the certificate provider + var validationCtx *v3tlspb.CertificateValidationContext + switch typ := common.GetValidationContextType().(type) { + case *v3tlspb.CommonTlsContext_ValidationContext: + validationCtx = common.GetValidationContext() + case *v3tlspb.CommonTlsContext_CombinedValidationContext: + validationCtx = common.GetCombinedValidationContext().GetDefaultValidationContext() + case nil: + // It is valid for the validation context to be nil on the server side. + return sc, nil + default: + return nil, fmt.Errorf("validation context contains unexpected type: %T", typ) + } + // If we get here, it means that the `CertificateValidationContext` message + // was found through one of the supported ways. It is an error if the + // validation context is specified, but it does not contain the + // ca_certificate_provider_instance field which contains information about + // the certificate provider to be used for the root certificates. + if validationCtx.GetCaCertificateProviderInstance() == nil { + return nil, fmt.Errorf("expected field ca_certificate_provider_instance is missing in CommonTlsContext message: %+v", common) + } + // The following fields are ignored: + // - trusted_ca + // - watched_directory + // - allow_expired_certificate + // - trust_chain_verification + switch { + case len(validationCtx.GetVerifyCertificateSpki()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_spki field in CommonTlsContext message: %+v", common) + case len(validationCtx.GetVerifyCertificateHash()) != 0: + return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) + case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): + return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) + case validationCtx.GetCrl() != nil: + return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) + case validationCtx.GetCustomValidatorConfig() != nil: + return nil, fmt.Errorf("unsupported custom_validator_config field in CommonTlsContext message: %+v", common) + } + + if rootProvider := validationCtx.GetCaCertificateProviderInstance(); rootProvider != nil { + sc.RootInstanceName = rootProvider.GetInstanceName() + sc.RootCertName = rootProvider.GetCertificateName() + } + var matchers []matcher.StringMatcher + for _, m := range validationCtx.GetMatchSubjectAltNames() { + matcher, err := matcher.StringMatcherFromProto(m) + if err != nil { + return nil, err + } + matchers = append(matchers, matcher) + } + if server && len(matchers) != 0 { + return nil, fmt.Errorf("match_subject_alt_names field in validation context is not supported on the server: %v", common) + } + sc.SubjectAltNameMatchers = matchers + return sc, nil +} + +// circuitBreakersFromCluster extracts the circuit breakers configuration from +// the received cluster resource. Returns nil if no CircuitBreakers or no +// Thresholds in CircuitBreakers. +func circuitBreakersFromCluster(cluster *v3clusterpb.Cluster) *uint32 { + for _, threshold := range cluster.GetCircuitBreakers().GetThresholds() { + if threshold.GetPriority() != v3corepb.RoutingPriority_DEFAULT { + continue + } + maxRequestsPb := threshold.GetMaxRequests() + if maxRequestsPb == nil { + return nil + } + maxRequests := maxRequestsPb.GetValue() + return &maxRequests + } + return nil +} + +// idurationp takes a time.Duration and converts it to an internal duration, and +// returns a pointer to that internal duration. +func idurationp(d time.Duration) *iserviceconfig.Duration { + id := iserviceconfig.Duration(d) + return &id +} + +func uint32p(i uint32) *uint32 { + return &i +} + +// Helper types to prepare Outlier Detection JSON. Pointer types to distinguish +// between unset and a zero value. +type successRateEjection struct { + StdevFactor *uint32 `json:"stdevFactor,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type failurePercentageEjection struct { + Threshold *uint32 `json:"threshold,omitempty"` + EnforcementPercentage *uint32 `json:"enforcementPercentage,omitempty"` + MinimumHosts *uint32 `json:"minimumHosts,omitempty"` + RequestVolume *uint32 `json:"requestVolume,omitempty"` +} + +type odLBConfig struct { + Interval *iserviceconfig.Duration `json:"interval,omitempty"` + BaseEjectionTime *iserviceconfig.Duration `json:"baseEjectionTime,omitempty"` + MaxEjectionTime *iserviceconfig.Duration `json:"maxEjectionTime,omitempty"` + MaxEjectionPercent *uint32 `json:"maxEjectionPercent,omitempty"` + SuccessRateEjection *successRateEjection `json:"successRateEjection,omitempty"` + FailurePercentageEjection *failurePercentageEjection `json:"failurePercentageEjection,omitempty"` +} + +// outlierConfigFromCluster converts the received Outlier Detection +// configuration into JSON configuration for Outlier Detection, taking into +// account xDS Defaults. Returns nil if no OutlierDetection field set in the +// cluster resource. +func outlierConfigFromCluster(cluster *v3clusterpb.Cluster) (json.RawMessage, error) { + od := cluster.GetOutlierDetection() + if od == nil { + return nil, nil + } + + // "The outlier_detection field of the Cluster resource should have its fields + // validated according to the rules for the corresponding LB policy config + // fields in the above "Validation" section. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 + // "The google.protobuf.Duration fields interval, base_ejection_time, and + // max_ejection_time must obey the restrictions in the + // google.protobuf.Duration documentation and they must have non-negative + // values." - A50 + var interval *iserviceconfig.Duration + if i := od.GetInterval(); i != nil { + if err := i.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.interval is invalid with error: %v", err) + } + if interval = idurationp(i.AsDuration()); *interval < 0 { + return nil, fmt.Errorf("outlier_detection.interval = %v; must be a valid duration and >= 0", *interval) + } + } + + var baseEjectionTime *iserviceconfig.Duration + if bet := od.GetBaseEjectionTime(); bet != nil { + if err := bet.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.base_ejection_time is invalid with error: %v", err) + } + if baseEjectionTime = idurationp(bet.AsDuration()); *baseEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.base_ejection_time = %v; must be >= 0", *baseEjectionTime) + } + } + + var maxEjectionTime *iserviceconfig.Duration + if met := od.GetMaxEjectionTime(); met != nil { + if err := met.CheckValid(); err != nil { + return nil, fmt.Errorf("outlier_detection.max_ejection_time is invalid: %v", err) + } + if maxEjectionTime = idurationp(met.AsDuration()); *maxEjectionTime < 0 { + return nil, fmt.Errorf("outlier_detection.max_ejection_time = %v; must be >= 0", *maxEjectionTime) + } + } + + // "The fields max_ejection_percent, enforcing_success_rate, + // failure_percentage_threshold, and enforcing_failure_percentage must have + // values less than or equal to 100. If any of these requirements is + // violated, the Cluster resource should be NACKed." - A50 + var maxEjectionPercent *uint32 + if mep := od.GetMaxEjectionPercent(); mep != nil { + if maxEjectionPercent = uint32p(mep.GetValue()); *maxEjectionPercent > 100 { + return nil, fmt.Errorf("outlier_detection.max_ejection_percent = %v; must be <= 100", *maxEjectionPercent) + } + } + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var enforcingSuccessRate *uint32 + if esr := od.GetEnforcingSuccessRate(); esr != nil { + if enforcingSuccessRate = uint32p(esr.GetValue()); *enforcingSuccessRate > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_success_rate = %v; must be <= 100", *enforcingSuccessRate) + } + } + var failurePercentageThreshold *uint32 + if fpt := od.GetFailurePercentageThreshold(); fpt != nil { + if failurePercentageThreshold = uint32p(fpt.GetValue()); *failurePercentageThreshold > 100 { + return nil, fmt.Errorf("outlier_detection.failure_percentage_threshold = %v; must be <= 100", *failurePercentageThreshold) + } + } + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var enforcingFailurePercentage *uint32 + if efp := od.GetEnforcingFailurePercentage(); efp != nil { + if enforcingFailurePercentage = uint32p(efp.GetValue()); *enforcingFailurePercentage > 100 { + return nil, fmt.Errorf("outlier_detection.enforcing_failure_percentage = %v; must be <= 100", *enforcingFailurePercentage) + } + } + + var successRateStdevFactor *uint32 + if srsf := od.GetSuccessRateStdevFactor(); srsf != nil { + successRateStdevFactor = uint32p(srsf.GetValue()) + } + var successRateMinimumHosts *uint32 + if srmh := od.GetSuccessRateMinimumHosts(); srmh != nil { + successRateMinimumHosts = uint32p(srmh.GetValue()) + } + var successRateRequestVolume *uint32 + if srrv := od.GetSuccessRateRequestVolume(); srrv != nil { + successRateRequestVolume = uint32p(srrv.GetValue()) + } + var failurePercentageMinimumHosts *uint32 + if fpmh := od.GetFailurePercentageMinimumHosts(); fpmh != nil { + failurePercentageMinimumHosts = uint32p(fpmh.GetValue()) + } + var failurePercentageRequestVolume *uint32 + if fprv := od.GetFailurePercentageRequestVolume(); fprv != nil { + failurePercentageRequestVolume = uint32p(fprv.GetValue()) + } + + // "if the enforcing_success_rate field is set to 0, the config + // success_rate_ejection field will be null and all success_rate_* fields + // will be ignored." - A50 + var sre *successRateEjection + if enforcingSuccessRate == nil || *enforcingSuccessRate != 0 { + sre = &successRateEjection{ + StdevFactor: successRateStdevFactor, + EnforcementPercentage: enforcingSuccessRate, + MinimumHosts: successRateMinimumHosts, + RequestVolume: successRateRequestVolume, + } + } + + // "If the enforcing_failure_percent field is set to 0 or null, the config + // failure_percent_ejection field will be null and all failure_percent_* + // fields will be ignored." - A50 + var fpe *failurePercentageEjection + if enforcingFailurePercentage != nil && *enforcingFailurePercentage != 0 { + fpe = &failurePercentageEjection{ + Threshold: failurePercentageThreshold, + EnforcementPercentage: enforcingFailurePercentage, + MinimumHosts: failurePercentageMinimumHosts, + RequestVolume: failurePercentageRequestVolume, + } + } + + odLBCfg := &odLBConfig{ + Interval: interval, + BaseEjectionTime: baseEjectionTime, + MaxEjectionTime: maxEjectionTime, + MaxEjectionPercent: maxEjectionPercent, + SuccessRateEjection: sre, + FailurePercentageEjection: fpe, + } + return json.Marshal(odLBCfg) +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go new file mode 100644 index 000000000000..67f0f7896b26 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_cds_test.go @@ -0,0 +1,1561 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "encoding/json" + "regexp" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + + v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3aggregateclusterpb "github.com/envoyproxy/go-control-plane/envoy/extensions/clusters/aggregate/v3" + v3leastrequestpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/least_request/v3" + v3ringhashpb "github.com/envoyproxy/go-control-plane/envoy/extensions/load_balancing_policies/ring_hash/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + anypb "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +const ( + clusterName = "clusterName" + serviceName = "service" +) + +var emptyUpdate = ClusterUpdate{ClusterName: clusterName, LRSServerConfig: ClusterLRSOff} + +func (s) TestValidateCluster_Failure(t *testing.T) { + oldCustomLBSupport := envconfig.XDSCustomLBPolicy + envconfig.XDSCustomLBPolicy = true + defer func() { + envconfig.XDSCustomLBPolicy = oldCustomLBSupport + }() + tests := []struct { + name string + cluster *v3clusterpb.Cluster + wantUpdate ClusterUpdate + wantErr bool + }{ + { + name: "non-supported-cluster-type-static", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }, + LbPolicy: v3clusterpb.Cluster_LEAST_REQUEST, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "non-supported-cluster-type-original-dst", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_ORIGINAL_DST}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }, + LbPolicy: v3clusterpb.Cluster_LEAST_REQUEST, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "no-eds-config", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "no-ads-config-source", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "non-round-robin-or-ring-hash-lb-policy", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }, + LbPolicy: v3clusterpb.Cluster_LEAST_REQUEST, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "logical-dns-multiple-localities", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_LOGICAL_DNS}, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LoadAssignment: &v3endpointpb.ClusterLoadAssignment{ + Endpoints: []*v3endpointpb.LocalityLbEndpoints{ + // Invalid if there are more than one locality. + {LbEndpoints: nil}, + {LbEndpoints: nil}, + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "ring-hash-hash-function-not-xx-hash", + cluster: &v3clusterpb.Cluster{ + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + HashFunction: v3clusterpb.Cluster_RingHashLbConfig_MURMUR_HASH_2, + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "ring-hash-max-bound-greater-than-upper-bound", + cluster: &v3clusterpb.Cluster{ + LbPolicy: v3clusterpb.Cluster_RING_HASH, + LbConfig: &v3clusterpb.Cluster_RingHashLbConfig_{ + RingHashLbConfig: &v3clusterpb.Cluster_RingHashLbConfig{ + MaximumRingSize: wrapperspb.UInt64(ringHashSizeUpperBound + 1), + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "ring-hash-max-bound-greater-than-upper-bound-load-balancing-policy", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3ringhashpb.RingHash{ + HashFunction: v3ringhashpb.RingHash_XX_HASH, + MinimumRingSize: wrapperspb.UInt64(10), + MaximumRingSize: wrapperspb.UInt64(ringHashSizeUpperBound + 1), + }), + }, + }, + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "least-request-unsupported-in-converter", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LoadBalancingPolicy: &v3clusterpb.LoadBalancingPolicy{ + Policies: []*v3clusterpb.LoadBalancingPolicy_Policy{ + { + TypedExtensionConfig: &v3corepb.TypedExtensionConfig{ + TypedConfig: testutils.MarshalAny(&v3leastrequestpb.LeastRequest{}), + }, + }, + }, + }, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "aggregate-nil-clusters", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{}), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + { + name: "aggregate-empty-clusters", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_ClusterType{ + ClusterType: &v3clusterpb.Cluster_CustomClusterType{ + Name: "envoy.clusters.aggregate", + TypedConfig: testutils.MarshalAny(&v3aggregateclusterpb.ClusterConfig{ + Clusters: []string{}, + }), + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + }, + wantUpdate: emptyUpdate, + wantErr: true, + }, + } + + oldAggregateAndDNSSupportEnv := envconfig.XDSAggregateAndDNS + envconfig.XDSAggregateAndDNS = true + defer func() { envconfig.XDSAggregateAndDNS = oldAggregateAndDNSSupportEnv }() + oldRingHashSupport := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRingHashSupport }() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if update, err := validateClusterAndConstructClusterUpdate(test.cluster); err == nil { + t.Errorf("validateClusterAndConstructClusterUpdate(%+v) = %v, wanted error", test.cluster, update) + } + }) + } +} + +func (s) TestValidateClusterWithSecurityConfig_EnvVarOff(t *testing.T) { + // Turn off the env var protection for client-side security. + origClientSideSecurityEnvVar := envconfig.XDSClientSideSecurity + envconfig.XDSClientSideSecurity = false + defer func() { envconfig.XDSClientSideSecurity = origClientSideSecurityEnvVar }() + + cluster := &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "rootInstance", + CertificateName: "rootCert", + }, + }, + }, + }), + }, + }, + } + wantUpdate := ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, + } + gotUpdate, err := validateClusterAndConstructClusterUpdate(cluster) + if err != nil { + t.Errorf("validateClusterAndConstructClusterUpdate() failed: %v", err) + } + if diff := cmp.Diff(wantUpdate, gotUpdate, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" { + t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, got):\n%s", diff) + } +} + +func (s) TestSecurityConfigFromCommonTLSContextUsingNewFields_ErrorCases(t *testing.T) { + tests := []struct { + name string + common *v3tlspb.CommonTlsContext + server bool + wantErr string + }{ + { + name: "unsupported-tls_certificates-field-for-identity-certs", + common: &v3tlspb.CommonTlsContext{ + TlsCertificates: []*v3tlspb.TlsCertificate{ + {CertificateChain: &v3corepb.DataSource{}}, + }, + }, + wantErr: "unsupported field tls_certificates is set in CommonTlsContext message", + }, + { + name: "unsupported-tls_certificates_sds_secret_configs-field-for-identity-certs", + common: &v3tlspb.CommonTlsContext{ + TlsCertificateSdsSecretConfigs: []*v3tlspb.SdsSecretConfig{ + {Name: "sds-secrets-config"}, + }, + }, + wantErr: "unsupported field tls_certificate_sds_secret_configs is set in CommonTlsContext message", + }, + { + name: "unsupported-sds-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", + }, + }, + }, + wantErr: "validation context contains unexpected type", + }, + { + name: "missing-ca_certificate_provider_instance-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{}, + }, + }, + wantErr: "expected field ca_certificate_provider_instance is missing in CommonTlsContext message", + }, + { + name: "unsupported-field-verify_certificate_spki-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + VerifyCertificateSpki: []string{"spki"}, + }, + }, + }, + wantErr: "unsupported verify_certificate_spki field in CommonTlsContext message", + }, + { + name: "unsupported-field-verify_certificate_hash-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + VerifyCertificateHash: []string{"hash"}, + }, + }, + }, + wantErr: "unsupported verify_certificate_hash field in CommonTlsContext message", + }, + { + name: "unsupported-field-require_signed_certificate_timestamp-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + RequireSignedCertificateTimestamp: &wrapperspb.BoolValue{Value: true}, + }, + }, + }, + wantErr: "unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message", + }, + { + name: "unsupported-field-crl-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + Crl: &v3corepb.DataSource{}, + }, + }, + }, + wantErr: "unsupported crl field in CommonTlsContext message", + }, + { + name: "unsupported-field-custom_validator_config-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + CustomValidatorConfig: &v3corepb.TypedExtensionConfig{}, + }, + }, + }, + wantErr: "unsupported custom_validator_config field in CommonTlsContext message", + }, + { + name: "invalid-match_subject_alt_names-field-in-validation-context", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: ""}}, + }, + }, + }, + }, + wantErr: "empty prefix is not allowed in StringMatcher", + }, + { + name: "unsupported-field-matching-subject-alt-names-in-validation-context-of-server", + common: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: "sanPrefix"}}, + }, + }, + }, + }, + server: true, + wantErr: "match_subject_alt_names field in validation context is not supported on the server", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := securityConfigFromCommonTLSContextUsingNewFields(test.common, test.server) + if err == nil { + t.Fatal("securityConfigFromCommonTLSContextUsingNewFields() succeeded when expected to fail") + } + if !strings.Contains(err.Error(), test.wantErr) { + t.Fatalf("securityConfigFromCommonTLSContextUsingNewFields() returned err: %v, wantErr: %v", err, test.wantErr) + } + }) + } +} + +func (s) TestValidateClusterWithSecurityConfig(t *testing.T) { + const ( + identityPluginInstance = "identityPluginInstance" + identityCertName = "identityCert" + rootPluginInstance = "rootPluginInstance" + rootCertName = "rootCert" + clusterName = "cluster" + serviceName = "service" + sanExact = "san-exact" + sanPrefix = "san-prefix" + sanSuffix = "san-suffix" + sanRegexBad = "??" + sanRegexGood = "san?regex?" + sanContains = "san-contains" + ) + var sanRE = regexp.MustCompile(sanRegexGood) + + tests := []struct { + name string + cluster *v3clusterpb.Cluster + wantUpdate ClusterUpdate + wantErr bool + }{ + { + name: "transport-socket-matches", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocketMatches: []*v3clusterpb.Cluster_TransportSocketMatch{ + {Name: "transport-socket-match-1"}, + }, + }, + wantErr: true, + }, + { + name: "transport-socket-unsupported-name", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "unsupported-foo", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3UpstreamTLSContextURL, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "transport-socket-unsupported-typeURL", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3HTTPConnManagerURL, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "transport-socket-unsupported-type", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3UpstreamTLSContextURL, + Value: []byte{1, 2, 3, 4}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "transport-socket-unsupported-tls-params-field", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsParams: &v3tlspb.TlsParameters{}, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "transport-socket-unsupported-custom-handshaker-field", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + CustomHandshaker: &v3corepb.TypedExtensionConfig{}, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "transport-socket-unsupported-validation-context", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", + }, + }, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "transport-socket-without-validation-context", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{}, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "empty-prefix-in-matching-SAN", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: ""}}, + }, + }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "empty-suffix-in-matching-SAN", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: ""}}, + }, + }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "empty-contains-in-matching-SAN", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: ""}}, + }, + }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "invalid-regex-in-matching-SAN", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexBad}}}, + }, + }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "invalid-regex-in-matching-SAN-with-new-fields", + cluster: &v3clusterpb.Cluster{ + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexBad}}}, + }, + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }, + }), + }, + }, + }, + wantErr: true, + }, + { + name: "happy-case-with-no-identity-certs-using-deprecated-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + }, + }, + }, + { + name: "happy-case-with-no-identity-certs-using-new-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + }, + }, + }, + { + name: "happy-case-with-validation-context-provider-instance-using-deprecated-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: identityPluginInstance, + CertificateName: identityCertName, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + IdentityInstanceName: identityPluginInstance, + IdentityCertName: identityCertName, + }, + }, + }, + { + name: "happy-case-with-validation-context-provider-instance-using-new-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: identityPluginInstance, + CertificateName: identityCertName, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + IdentityInstanceName: identityPluginInstance, + IdentityCertName: identityCertName, + }, + }, + }, + { + name: "happy-case-with-combined-validation-context-using-deprecated-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: identityPluginInstance, + CertificateName: identityCertName, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + { + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: sanExact}, + IgnoreCase: true, + }, + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: sanPrefix}}, + {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: sanSuffix}}, + {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexGood}}}, + {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: sanContains}}, + }, + }, + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + IdentityInstanceName: identityPluginInstance, + IdentityCertName: identityCertName, + SubjectAltNameMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP(sanExact), nil, nil, nil, nil, true), + matcher.StringMatcherForTesting(nil, newStringP(sanPrefix), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP(sanSuffix), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, sanRE, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP(sanContains), nil, false), + }, + }, + }, + }, + { + name: "happy-case-with-combined-validation-context-using-new-fields", + cluster: &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: serviceName, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: identityPluginInstance, + CertificateName: identityCertName, + }, + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + MatchSubjectAltNames: []*v3matcherpb.StringMatcher{ + { + MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: sanExact}, + IgnoreCase: true, + }, + {MatchPattern: &v3matcherpb.StringMatcher_Prefix{Prefix: sanPrefix}}, + {MatchPattern: &v3matcherpb.StringMatcher_Suffix{Suffix: sanSuffix}}, + {MatchPattern: &v3matcherpb.StringMatcher_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: sanRegexGood}}}, + {MatchPattern: &v3matcherpb.StringMatcher_Contains{Contains: sanContains}}, + }, + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: rootPluginInstance, + CertificateName: rootCertName, + }, + }, + }, + }, + }, + }), + }, + }, + }, + wantUpdate: ClusterUpdate{ + ClusterName: clusterName, + EDSServiceName: serviceName, + LRSServerConfig: ClusterLRSOff, + SecurityCfg: &SecurityConfig{ + RootInstanceName: rootPluginInstance, + RootCertName: rootCertName, + IdentityInstanceName: identityPluginInstance, + IdentityCertName: identityCertName, + SubjectAltNameMatchers: []matcher.StringMatcher{ + matcher.StringMatcherForTesting(newStringP(sanExact), nil, nil, nil, nil, true), + matcher.StringMatcherForTesting(nil, newStringP(sanPrefix), nil, nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, newStringP(sanSuffix), nil, nil, false), + matcher.StringMatcherForTesting(nil, nil, nil, nil, sanRE, false), + matcher.StringMatcherForTesting(nil, nil, nil, newStringP(sanContains), nil, false), + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + update, err := validateClusterAndConstructClusterUpdate(test.cluster) + if (err != nil) != test.wantErr { + t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) + } + if diff := cmp.Diff(test.wantUpdate, update, cmpopts.EquateEmpty(), cmp.AllowUnexported(regexp.Regexp{}), cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" { + t.Errorf("validateClusterAndConstructClusterUpdate() returned unexpected diff (-want, +got):\n%s", diff) + } + }) + } +} + +func (s) TestUnmarshalCluster(t *testing.T) { + const ( + v3ClusterName = "v3clusterName" + v3Service = "v3Service" + ) + var ( + v3ClusterAny = testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: v3ClusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: v3Service, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }) + + v3ClusterAnyWithEDSConfigSourceSelf = testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: v3ClusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{}, + }, + ServiceName: v3Service, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{ + Self: &v3corepb.SelfConfigSource{}, + }, + }, + }) + ) + + tests := []struct { + name string + resource *anypb.Any + wantName string + wantUpdate ClusterUpdate + wantErr bool + }{ + { + name: "non-cluster resource type", + resource: &anypb.Any{TypeUrl: version.V3HTTPConnManagerURL}, + wantErr: true, + }, + { + name: "badly marshaled cluster resource", + resource: &anypb.Any{ + TypeUrl: version.V3ClusterURL, + Value: []byte{1, 2, 3, 4}, + }, + wantErr: true, + }, + { + name: "bad cluster resource", + resource: testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "test", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_STATIC}, + }), + wantName: "test", + wantErr: true, + }, + { + name: "cluster resource with non-self lrs_server field", + resource: testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "test", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: v3Service, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + LrsServer: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }), + wantName: "test", + wantErr: true, + }, + { + name: "v3 cluster", + resource: v3ClusterAny, + wantName: v3ClusterName, + wantUpdate: ClusterUpdate{ + ClusterName: v3ClusterName, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v3ClusterAny, + }, + }, + { + name: "v3 cluster wrapped", + resource: testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3ClusterAny}), + wantName: v3ClusterName, + wantUpdate: ClusterUpdate{ + ClusterName: v3ClusterName, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v3ClusterAny, + }, + }, + { + name: "v3 cluster with EDS config source self", + resource: v3ClusterAnyWithEDSConfigSourceSelf, + wantName: v3ClusterName, + wantUpdate: ClusterUpdate{ + ClusterName: v3ClusterName, + EDSServiceName: v3Service, LRSServerConfig: ClusterLRSServerSelf, + Raw: v3ClusterAnyWithEDSConfigSourceSelf, + }, + }, + { + name: "xdstp cluster resource with unset EDS service name", + resource: testutils.MarshalAny(&v3clusterpb.Cluster{ + Name: "xdstp:foo", + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + ServiceName: "", + }, + }), + wantName: "xdstp:foo", + wantErr: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + name, update, err := unmarshalClusterResource(test.resource) + if (err != nil) != test.wantErr { + t.Fatalf("unmarshalClusterResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) + } + if name != test.wantName { + t.Errorf("unmarshalClusterResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) + } + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts, cmpopts.IgnoreFields(ClusterUpdate{}, "LBPolicy")); diff != "" { + t.Errorf("unmarshalClusterResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) + } + }) + } +} + +func (s) TestValidateClusterWithOutlierDetection(t *testing.T) { + odToClusterProto := func(od *v3clusterpb.OutlierDetection) *v3clusterpb.Cluster { + // Cluster parsing doesn't fail with respect to fields orthogonal to + // outlier detection. + return &v3clusterpb.Cluster{ + Name: clusterName, + ClusterDiscoveryType: &v3clusterpb.Cluster_Type{Type: v3clusterpb.Cluster_EDS}, + EdsClusterConfig: &v3clusterpb.Cluster_EdsClusterConfig{ + EdsConfig: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{ + Ads: &v3corepb.AggregatedConfigSource{}, + }, + }, + }, + LbPolicy: v3clusterpb.Cluster_ROUND_ROBIN, + OutlierDetection: od, + } + } + + tests := []struct { + name string + cluster *v3clusterpb.Cluster + wantODCfg string + wantErr bool + }{ + { + name: "success-and-failure-null", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{}), + wantODCfg: `{"successRateEjection": {}}`, + }, + { + name: "success-and-failure-zero", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{ + EnforcingSuccessRate: &wrapperspb.UInt32Value{Value: 0}, // Thus doesn't create sre - to focus on fpe + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 0}, + }), + wantODCfg: `{}`, + }, + { + name: "some-fields-set", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{ + Interval: &durationpb.Duration{Seconds: 1}, + MaxEjectionTime: &durationpb.Duration{Seconds: 3}, + EnforcingSuccessRate: &wrapperspb.UInt32Value{Value: 3}, + SuccessRateRequestVolume: &wrapperspb.UInt32Value{Value: 5}, + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 7}, + FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 9}, + }), + wantODCfg: `{ + "interval": "1s", + "maxEjectionTime": "3s", + "successRateEjection": { + "enforcementPercentage": 3, + "requestVolume": 5 + }, + "failurePercentageEjection": { + "enforcementPercentage": 7, + "requestVolume": 9 + } + }`, + }, + { + name: "every-field-set-non-zero", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{ + // all fields set (including ones that will be layered) should + // pick up those too and explicitly all fields, including those + // put in layers, in the JSON generated. + Interval: &durationpb.Duration{Seconds: 1}, + BaseEjectionTime: &durationpb.Duration{Seconds: 2}, + MaxEjectionTime: &durationpb.Duration{Seconds: 3}, + MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 1}, + SuccessRateStdevFactor: &wrapperspb.UInt32Value{Value: 2}, + EnforcingSuccessRate: &wrapperspb.UInt32Value{Value: 3}, + SuccessRateMinimumHosts: &wrapperspb.UInt32Value{Value: 4}, + SuccessRateRequestVolume: &wrapperspb.UInt32Value{Value: 5}, + FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 6}, + EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 7}, + FailurePercentageMinimumHosts: &wrapperspb.UInt32Value{Value: 8}, + FailurePercentageRequestVolume: &wrapperspb.UInt32Value{Value: 9}, + }), + wantODCfg: `{ + "interval": "1s", + "baseEjectionTime": "2s", + "maxEjectionTime": "3s", + "maxEjectionPercent": 1, + "successRateEjection": { + "stdevFactor": 2, + "enforcementPercentage": 3, + "minimumHosts": 4, + "requestVolume": 5 + }, + "failurePercentageEjection": { + "threshold": 6, + "enforcementPercentage": 7, + "minimumHosts": 8, + "requestVolume": 9 + } + }`, + }, + { + name: "interval-is-negative", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{Interval: &durationpb.Duration{Seconds: -10}}), + wantErr: true, + }, + { + name: "interval-overflows", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{Interval: &durationpb.Duration{Seconds: 315576000001}}), + wantErr: true, + }, + { + name: "base-ejection-time-is-negative", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{BaseEjectionTime: &durationpb.Duration{Seconds: -10}}), + wantErr: true, + }, + { + name: "base-ejection-time-overflows", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{BaseEjectionTime: &durationpb.Duration{Seconds: 315576000001}}), + wantErr: true, + }, + { + name: "max-ejection-time-is-negative", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{MaxEjectionTime: &durationpb.Duration{Seconds: -10}}), + wantErr: true, + }, + { + name: "max-ejection-time-overflows", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{MaxEjectionTime: &durationpb.Duration{Seconds: 315576000001}}), + wantErr: true, + }, + { + name: "max-ejection-percent-is-greater-than-100", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{MaxEjectionPercent: &wrapperspb.UInt32Value{Value: 150}}), + wantErr: true, + }, + { + name: "enforcing-success-rate-is-greater-than-100", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{EnforcingSuccessRate: &wrapperspb.UInt32Value{Value: 150}}), + wantErr: true, + }, + { + name: "failure-percentage-threshold-is-greater-than-100", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{FailurePercentageThreshold: &wrapperspb.UInt32Value{Value: 150}}), + wantErr: true, + }, + { + name: "enforcing-failure-percentage-is-greater-than-100", + cluster: odToClusterProto(&v3clusterpb.OutlierDetection{EnforcingFailurePercentage: &wrapperspb.UInt32Value{Value: 150}}), + wantErr: true, + }, + // A Outlier Detection proto not present should lead to a nil + // OutlierDetection field in the ClusterUpdate, which is implicitly + // tested in every other test in this file. + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + update, err := validateClusterAndConstructClusterUpdate(test.cluster) + if (err != nil) != test.wantErr { + t.Errorf("validateClusterAndConstructClusterUpdate() returned err %v wantErr %v)", err, test.wantErr) + } + if test.wantErr { + return + } + // got and want must be unmarshalled since JSON strings shouldn't + // generally be directly compared. + var got map[string]interface{} + if err := json.Unmarshal(update.OutlierDetection, &got); err != nil { + t.Fatalf("Error unmarshalling update.OutlierDetection (%q): %v", update.OutlierDetection, err) + } + var want map[string]interface{} + if err := json.Unmarshal(json.RawMessage(test.wantODCfg), &want); err != nil { + t.Fatalf("Error unmarshalling wantODCfg (%q): %v", test.wantODCfg, err) + } + if diff := cmp.Diff(got, want); diff != "" { + t.Fatalf("cluster.OutlierDetection got unexpected output, diff (-got, +want): %v", diff) + } + }) + } +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go new file mode 100644 index 000000000000..95333aaf61d5 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "math" + "net" + "strconv" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/xds/internal" + "google.golang.org/protobuf/types/known/anypb" +) + +func unmarshalEndpointsResource(r *anypb.Any) (string, EndpointsUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsEndpointsResource(r.GetTypeUrl()) { + return "", EndpointsUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + + cla := &v3endpointpb.ClusterLoadAssignment{} + if err := proto.Unmarshal(r.GetValue(), cla); err != nil { + return "", EndpointsUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + u, err := parseEDSRespProto(cla) + if err != nil { + return cla.GetClusterName(), EndpointsUpdate{}, err + } + u.Raw = r + return cla.GetClusterName(), u, nil +} + +func parseAddress(socketAddress *v3corepb.SocketAddress) string { + return net.JoinHostPort(socketAddress.GetAddress(), strconv.Itoa(int(socketAddress.GetPortValue()))) +} + +func parseDropPolicy(dropPolicy *v3endpointpb.ClusterLoadAssignment_Policy_DropOverload) OverloadDropConfig { + percentage := dropPolicy.GetDropPercentage() + var ( + numerator = percentage.GetNumerator() + denominator uint32 + ) + switch percentage.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + denominator = 100 + case v3typepb.FractionalPercent_TEN_THOUSAND: + denominator = 10000 + case v3typepb.FractionalPercent_MILLION: + denominator = 1000000 + } + return OverloadDropConfig{ + Category: dropPolicy.GetCategory(), + Numerator: numerator, + Denominator: denominator, + } +} + +func parseEndpoints(lbEndpoints []*v3endpointpb.LbEndpoint, uniqueEndpointAddrs map[string]bool) ([]Endpoint, error) { + endpoints := make([]Endpoint, 0, len(lbEndpoints)) + for _, lbEndpoint := range lbEndpoints { + // If the load_balancing_weight field is specified, it must be set to a + // value of at least 1. If unspecified, each host is presumed to have + // equal weight in a locality. + weight := uint32(1) + if w := lbEndpoint.GetLoadBalancingWeight(); w != nil { + if w.GetValue() == 0 { + return nil, fmt.Errorf("EDS response contains an endpoint with zero weight: %+v", lbEndpoint) + } + weight = w.GetValue() + } + addr := parseAddress(lbEndpoint.GetEndpoint().GetAddress().GetSocketAddress()) + if uniqueEndpointAddrs[addr] { + return nil, fmt.Errorf("duplicate endpoint with the same address %s", addr) + } + uniqueEndpointAddrs[addr] = true + endpoints = append(endpoints, Endpoint{ + HealthStatus: EndpointHealthStatus(lbEndpoint.GetHealthStatus()), + Address: addr, + Weight: weight, + }) + } + return endpoints, nil +} + +func parseEDSRespProto(m *v3endpointpb.ClusterLoadAssignment) (EndpointsUpdate, error) { + ret := EndpointsUpdate{} + for _, dropPolicy := range m.GetPolicy().GetDropOverloads() { + ret.Drops = append(ret.Drops, parseDropPolicy(dropPolicy)) + } + priorities := make(map[uint32]map[string]bool) + sumOfWeights := make(map[uint32]uint64) + uniqueEndpointAddrs := make(map[string]bool) + for _, locality := range m.Endpoints { + l := locality.GetLocality() + if l == nil { + return EndpointsUpdate{}, fmt.Errorf("EDS response contains a locality without ID, locality: %+v", locality) + } + weight := locality.GetLoadBalancingWeight().GetValue() + if weight == 0 { + logger.Warningf("Ignoring locality %s with weight 0", pretty.ToJSON(l)) + continue + } + priority := locality.GetPriority() + sumOfWeights[priority] += uint64(weight) + if sumOfWeights[priority] > math.MaxUint32 { + return EndpointsUpdate{}, fmt.Errorf("sum of weights of localities at the same priority %d exceeded maximal value", priority) + } + localitiesWithPriority := priorities[priority] + if localitiesWithPriority == nil { + localitiesWithPriority = make(map[string]bool) + priorities[priority] = localitiesWithPriority + } + lid := internal.LocalityID{ + Region: l.Region, + Zone: l.Zone, + SubZone: l.SubZone, + } + lidStr, _ := lid.ToString() + + // "Since an xDS configuration can place a given locality under multiple + // priorities, it is possible to see locality weight attributes with + // different values for the same locality." - A52 + // + // This is handled in the client by emitting the locality weight + // specified for the priority it is specified in. If the same locality + // has a different weight in two priorities, each priority will specify + // a locality with the locality weight specified for that priority, and + // thus the subsequent tree of balancers linked to that priority will + // use that locality weight as well. + if localitiesWithPriority[lidStr] { + return EndpointsUpdate{}, fmt.Errorf("duplicate locality %s with the same priority %v", lidStr, priority) + } + localitiesWithPriority[lidStr] = true + endpoints, err := parseEndpoints(locality.GetLbEndpoints(), uniqueEndpointAddrs) + if err != nil { + return EndpointsUpdate{}, err + } + ret.Localities = append(ret.Localities, Locality{ + ID: lid, + Endpoints: endpoints, + Weight: weight, + Priority: priority, + }) + } + for i := 0; i < len(priorities); i++ { + if _, ok := priorities[uint32(i)]; !ok { + return EndpointsUpdate{}, fmt.Errorf("priority %v missing (with different priorities %v received)", i, priorities) + } + } + return ret, nil +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go new file mode 100644 index 000000000000..cfb452b26fad --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_eds_test.go @@ -0,0 +1,437 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "net" + "strconv" + "testing" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + anypb "github.com/golang/protobuf/ptypes/any" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/xds/internal" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" +) + +func (s) TestEDSParseRespProto(t *testing.T) { + tests := []struct { + name string + m *v3endpointpb.ClusterLoadAssignment + want EndpointsUpdate + wantErr bool + }{ + { + name: "missing-priority", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) + clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, + { + name: "missing-locality-ID", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("", 1, 0, []string{"addr1:314"}, nil) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, + { + name: "zero-endpoint-weight", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-0", 1, 0, []string{"addr1:314"}, &addLocalityOptions{Weight: []uint32{0}}) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, + { + name: "duplicate-locality-in-the-same-priority", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-0", 1, 0, []string{"addr1:314"}, nil) + clab0.addLocality("locality-0", 1, 0, []string{"addr1:314"}, nil) // Duplicate locality with the same priority. + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, + { + name: "missing locality weight", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 0, 1, []string{"addr1:314"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_HEALTHY}, + }) + clab0.addLocality("locality-2", 0, 0, []string{"addr2:159"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_HEALTHY}, + }) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + }, + { + name: "max sum of weights at the same priority exceeded", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) + clab0.addLocality("locality-2", 4294967295, 1, []string{"addr2:159"}, nil) + clab0.addLocality("locality-3", 1, 1, []string{"addr2:88"}, nil) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, + { + name: "duplicate endpoint address", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 1, []string{"addr:997"}, nil) + clab0.addLocality("locality-2", 1, 0, []string{"addr:997"}, nil) + return clab0.Build() + }(), + want: EndpointsUpdate{}, + wantErr: true, + }, + { + name: "good", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY}, + Weight: []uint32{271}, + }) + clab0.addLocality("locality-2", 1, 0, []string{"addr2:159"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING}, + Weight: []uint32{828}, + }) + return clab0.Build() + }(), + want: EndpointsUpdate{ + Drops: nil, + Localities: []Locality{ + { + Endpoints: []Endpoint{{ + Address: "addr1:314", + HealthStatus: EndpointHealthStatusUnhealthy, + Weight: 271, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []Endpoint{{ + Address: "addr2:159", + HealthStatus: EndpointHealthStatusDraining, + Weight: 828, + }}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + }, + }, + wantErr: false, + }, + { + name: "good duplicate locality with different priority", + m: func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY}, + Weight: []uint32{271}, + }) + // Same locality name, but with different priority. + clab0.addLocality("locality-1", 1, 0, []string{"addr2:159"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING}, + Weight: []uint32{828}, + }) + return clab0.Build() + }(), + want: EndpointsUpdate{ + Drops: nil, + Localities: []Locality{ + { + Endpoints: []Endpoint{{ + Address: "addr1:314", + HealthStatus: EndpointHealthStatusUnhealthy, + Weight: 271, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []Endpoint{{ + Address: "addr2:159", + HealthStatus: EndpointHealthStatusDraining, + Weight: 828, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 0, + Weight: 1, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseEDSRespProto(tt.m) + if (err != nil) != tt.wantErr { + t.Errorf("parseEDSRespProto() error = %v, wantErr %v", err, tt.wantErr) + return + } + if d := cmp.Diff(got, tt.want); d != "" { + t.Errorf("parseEDSRespProto() got = %v, want %v, diff: %v", got, tt.want, d) + } + }) + } +} + +func (s) TestUnmarshalEndpoints(t *testing.T) { + var v3EndpointsAny = testutils.MarshalAny(func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 1, []string{"addr1:314"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_UNHEALTHY}, + Weight: []uint32{271}, + }) + clab0.addLocality("locality-2", 1, 0, []string{"addr2:159"}, &addLocalityOptions{ + Health: []v3corepb.HealthStatus{v3corepb.HealthStatus_DRAINING}, + Weight: []uint32{828}, + }) + return clab0.Build() + }()) + + tests := []struct { + name string + resource *anypb.Any + wantName string + wantUpdate EndpointsUpdate + wantErr bool + }{ + { + name: "non-clusterLoadAssignment resource type", + resource: &anypb.Any{TypeUrl: version.V3HTTPConnManagerURL}, + wantErr: true, + }, + { + name: "badly marshaled clusterLoadAssignment resource", + resource: &anypb.Any{ + TypeUrl: version.V3EndpointsURL, + Value: []byte{1, 2, 3, 4}, + }, + wantErr: true, + }, + { + name: "bad endpoints resource", + resource: testutils.MarshalAny(func() *v3endpointpb.ClusterLoadAssignment { + clab0 := newClaBuilder("test", nil) + clab0.addLocality("locality-1", 1, 0, []string{"addr1:314"}, nil) + clab0.addLocality("locality-2", 1, 2, []string{"addr2:159"}, nil) + return clab0.Build() + }()), + wantName: "test", + wantErr: true, + }, + { + name: "v3 endpoints", + resource: v3EndpointsAny, + wantName: "test", + wantUpdate: EndpointsUpdate{ + Drops: nil, + Localities: []Locality{ + { + Endpoints: []Endpoint{{ + Address: "addr1:314", + HealthStatus: EndpointHealthStatusUnhealthy, + Weight: 271, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []Endpoint{{ + Address: "addr2:159", + HealthStatus: EndpointHealthStatusDraining, + Weight: 828, + }}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + }, + Raw: v3EndpointsAny, + }, + }, + { + name: "v3 endpoints wrapped", + resource: testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3EndpointsAny}), + wantName: "test", + wantUpdate: EndpointsUpdate{ + Drops: nil, + Localities: []Locality{ + { + Endpoints: []Endpoint{{ + Address: "addr1:314", + HealthStatus: EndpointHealthStatusUnhealthy, + Weight: 271, + }}, + ID: internal.LocalityID{SubZone: "locality-1"}, + Priority: 1, + Weight: 1, + }, + { + Endpoints: []Endpoint{{ + Address: "addr2:159", + HealthStatus: EndpointHealthStatusDraining, + Weight: 828, + }}, + ID: internal.LocalityID{SubZone: "locality-2"}, + Priority: 0, + Weight: 1, + }, + }, + Raw: v3EndpointsAny, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + name, update, err := unmarshalEndpointsResource(test.resource) + if (err != nil) != test.wantErr { + t.Fatalf("unmarshalEndpointsResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) + } + if name != test.wantName { + t.Errorf("unmarshalEndpointsResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) + } + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalEndpointsResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) + } + }) + } +} + +// claBuilder builds a ClusterLoadAssignment, aka EDS +// response. +type claBuilder struct { + v *v3endpointpb.ClusterLoadAssignment +} + +// newClaBuilder creates a claBuilder. +func newClaBuilder(clusterName string, dropPercents []uint32) *claBuilder { + var drops []*v3endpointpb.ClusterLoadAssignment_Policy_DropOverload + for i, d := range dropPercents { + drops = append(drops, &v3endpointpb.ClusterLoadAssignment_Policy_DropOverload{ + Category: fmt.Sprintf("test-drop-%d", i), + DropPercentage: &v3typepb.FractionalPercent{ + Numerator: d, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }) + } + + return &claBuilder{ + v: &v3endpointpb.ClusterLoadAssignment{ + ClusterName: clusterName, + Policy: &v3endpointpb.ClusterLoadAssignment_Policy{ + DropOverloads: drops, + }, + }, + } +} + +// addLocalityOptions contains options when adding locality to the builder. +type addLocalityOptions struct { + Health []v3corepb.HealthStatus + Weight []uint32 +} + +// addLocality adds a locality to the builder. +func (clab *claBuilder) addLocality(subzone string, weight uint32, priority uint32, addrsWithPort []string, opts *addLocalityOptions) { + var lbEndPoints []*v3endpointpb.LbEndpoint + for i, a := range addrsWithPort { + host, portStr, err := net.SplitHostPort(a) + if err != nil { + panic("failed to split " + a) + } + port, err := strconv.Atoi(portStr) + if err != nil { + panic("failed to atoi " + portStr) + } + + lbe := &v3endpointpb.LbEndpoint{ + HostIdentifier: &v3endpointpb.LbEndpoint_Endpoint{ + Endpoint: &v3endpointpb.Endpoint{ + Address: &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Protocol: v3corepb.SocketAddress_TCP, + Address: host, + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: uint32(port)}}}}}}, + } + if opts != nil { + if i < len(opts.Health) { + lbe.HealthStatus = opts.Health[i] + } + if i < len(opts.Weight) { + lbe.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: opts.Weight[i]} + } + } + lbEndPoints = append(lbEndPoints, lbe) + } + + var localityID *v3corepb.Locality + if subzone != "" { + localityID = &v3corepb.Locality{ + Region: "", + Zone: "", + SubZone: subzone, + } + } + + clab.v.Endpoints = append(clab.v.Endpoints, &v3endpointpb.LocalityLbEndpoints{ + Locality: localityID, + LbEndpoints: lbEndPoints, + LoadBalancingWeight: &wrapperspb.UInt32Value{Value: weight}, + Priority: priority, + }) +} + +// Build builds ClusterLoadAssignment. +func (clab *claBuilder) Build() *v3endpointpb.ClusterLoadAssignment { + return clab.v +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go new file mode 100644 index 000000000000..8f18b02e28a6 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "strconv" + + v1udpaudpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/protobuf/types/known/anypb" +) + +func unmarshalListenerResource(r *anypb.Any) (string, ListenerUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsListenerResource(r.GetTypeUrl()) { + return "", ListenerUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + lis := &v3listenerpb.Listener{} + if err := proto.Unmarshal(r.GetValue(), lis); err != nil { + return "", ListenerUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + lu, err := processListener(lis) + if err != nil { + return lis.GetName(), ListenerUpdate{}, err + } + lu.Raw = r + return lis.GetName(), *lu, nil +} + +func processListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + if lis.GetApiListener() != nil { + return processClientSideListener(lis) + } + return processServerSideListener(lis) +} + +// processClientSideListener checks if the provided Listener proto meets +// the expected criteria. If so, it returns a non-empty routeConfigName. +func processClientSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + update := &ListenerUpdate{} + + apiLisAny := lis.GetApiListener().GetApiListener() + if !IsHTTPConnManagerResource(apiLisAny.GetTypeUrl()) { + return nil, fmt.Errorf("unexpected resource type: %q", apiLisAny.GetTypeUrl()) + } + apiLis := &v3httppb.HttpConnectionManager{} + if err := proto.Unmarshal(apiLisAny.GetValue(), apiLis); err != nil { + return nil, fmt.Errorf("failed to unmarshal api_listner: %v", err) + } + // "HttpConnectionManager.xff_num_trusted_hops must be unset or zero and + // HttpConnectionManager.original_ip_detection_extensions must be empty. If + // either field has an incorrect value, the Listener must be NACKed." - A41 + if apiLis.XffNumTrustedHops != 0 { + return nil, fmt.Errorf("xff_num_trusted_hops must be unset or zero %+v", apiLis) + } + if len(apiLis.OriginalIpDetectionExtensions) != 0 { + return nil, fmt.Errorf("original_ip_detection_extensions must be empty %+v", apiLis) + } + + switch apiLis.RouteSpecifier.(type) { + case *v3httppb.HttpConnectionManager_Rds: + if configsource := apiLis.GetRds().GetConfigSource(); configsource.GetAds() == nil && configsource.GetSelf() == nil { + return nil, fmt.Errorf("LDS's RDS configSource is not ADS or Self: %+v", lis) + } + name := apiLis.GetRds().GetRouteConfigName() + if name == "" { + return nil, fmt.Errorf("empty route_config_name: %+v", lis) + } + update.RouteConfigName = name + case *v3httppb.HttpConnectionManager_RouteConfig: + routeU, err := generateRDSUpdateFromRouteConfiguration(apiLis.GetRouteConfig()) + if err != nil { + return nil, fmt.Errorf("failed to parse inline RDS resp: %v", err) + } + update.InlineRouteConfig = &routeU + case nil: + return nil, fmt.Errorf("no RouteSpecifier: %+v", apiLis) + default: + return nil, fmt.Errorf("unsupported type %T for RouteSpecifier", apiLis.RouteSpecifier) + } + + // The following checks and fields only apply to xDS protocol versions v3+. + + update.MaxStreamDuration = apiLis.GetCommonHttpProtocolOptions().GetMaxStreamDuration().AsDuration() + + var err error + if update.HTTPFilters, err = processHTTPFilters(apiLis.GetHttpFilters(), false); err != nil { + return nil, err + } + + return update, nil +} + +func unwrapHTTPFilterConfig(config *anypb.Any) (proto.Message, string, error) { + switch { + case ptypes.Is(config, &v3xdsxdstypepb.TypedStruct{}): + // The real type name is inside the new TypedStruct message. + s := new(v3xdsxdstypepb.TypedStruct) + if err := ptypes.UnmarshalAny(config, s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + case ptypes.Is(config, &v1udpaudpatypepb.TypedStruct{}): + // The real type name is inside the old TypedStruct message. + s := new(v1udpaudpatypepb.TypedStruct) + if err := ptypes.UnmarshalAny(config, s); err != nil { + return nil, "", fmt.Errorf("error unmarshalling TypedStruct filter config: %v", err) + } + return s, s.GetTypeUrl(), nil + default: + return config, config.GetTypeUrl(), nil + } +} + +func validateHTTPFilterConfig(cfg *anypb.Any, lds, optional bool) (httpfilter.Filter, httpfilter.FilterConfig, error) { + config, typeURL, err := unwrapHTTPFilterConfig(cfg) + if err != nil { + return nil, nil, err + } + filterBuilder := httpfilter.Get(typeURL) + if filterBuilder == nil { + if optional { + return nil, nil, nil + } + return nil, nil, fmt.Errorf("no filter implementation found for %q", typeURL) + } + parseFunc := filterBuilder.ParseFilterConfig + if !lds { + parseFunc = filterBuilder.ParseFilterConfigOverride + } + filterConfig, err := parseFunc(config) + if err != nil { + return nil, nil, fmt.Errorf("error parsing config for filter %q: %v", typeURL, err) + } + return filterBuilder, filterConfig, nil +} + +func processHTTPFilterOverrides(cfgs map[string]*anypb.Any) (map[string]httpfilter.FilterConfig, error) { + if len(cfgs) == 0 { + return nil, nil + } + m := make(map[string]httpfilter.FilterConfig) + for name, cfg := range cfgs { + optional := false + s := new(v3routepb.FilterConfig) + if ptypes.Is(cfg, s) { + if err := ptypes.UnmarshalAny(cfg, s); err != nil { + return nil, fmt.Errorf("filter override %q: error unmarshalling FilterConfig: %v", name, err) + } + cfg = s.GetConfig() + optional = s.GetIsOptional() + } + + httpFilter, config, err := validateHTTPFilterConfig(cfg, false, optional) + if err != nil { + return nil, fmt.Errorf("filter override %q: %v", name, err) + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + m[name] = config + } + return m, nil +} + +func processHTTPFilters(filters []*v3httppb.HttpFilter, server bool) ([]HTTPFilter, error) { + ret := make([]HTTPFilter, 0, len(filters)) + seenNames := make(map[string]bool, len(filters)) + for _, filter := range filters { + name := filter.GetName() + if name == "" { + return nil, errors.New("filter missing name field") + } + if seenNames[name] { + return nil, fmt.Errorf("duplicate filter name %q", name) + } + seenNames[name] = true + + httpFilter, config, err := validateHTTPFilterConfig(filter.GetTypedConfig(), true, filter.GetIsOptional()) + if err != nil { + return nil, err + } + if httpFilter == nil { + // Optional configs are ignored. + continue + } + if server { + if _, ok := httpFilter.(httpfilter.ServerInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported server-side", name) + } + } else if _, ok := httpFilter.(httpfilter.ClientInterceptorBuilder); !ok { + if filter.GetIsOptional() { + continue + } + return nil, fmt.Errorf("HTTP filter %q not supported client-side", name) + } + + // Save name/config + ret = append(ret, HTTPFilter{Name: name, Filter: httpFilter, Config: config}) + } + // "Validation will fail if a terminal filter is not the last filter in the + // chain or if a non-terminal filter is the last filter in the chain." - A39 + if len(ret) == 0 { + return nil, fmt.Errorf("http filters list is empty") + } + var i int + for ; i < len(ret)-1; i++ { + if ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is a terminal filter but it is not last in the filter chain", ret[i].Name) + } + } + if !ret[i].Filter.IsTerminal() { + return nil, fmt.Errorf("http filter %q is not a terminal filter", ret[len(ret)-1].Name) + } + return ret, nil +} + +func processServerSideListener(lis *v3listenerpb.Listener) (*ListenerUpdate, error) { + if n := len(lis.ListenerFilters); n != 0 { + return nil, fmt.Errorf("unsupported field 'listener_filters' contains %d entries", n) + } + if useOrigDst := lis.GetUseOriginalDst(); useOrigDst != nil && useOrigDst.GetValue() { + return nil, errors.New("unsupported field 'use_original_dst' is present and set to true") + } + addr := lis.GetAddress() + if addr == nil { + return nil, fmt.Errorf("no address field in LDS response: %+v", lis) + } + sockAddr := addr.GetSocketAddress() + if sockAddr == nil { + return nil, fmt.Errorf("no socket_address field in LDS response: %+v", lis) + } + lu := &ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: sockAddr.GetAddress(), + Port: strconv.Itoa(int(sockAddr.GetPortValue())), + }, + } + + fcMgr, err := NewFilterChainManager(lis) + if err != nil { + return nil, err + } + lu.InboundListenerCfg.FilterChains = fcMgr + return lu, nil +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go new file mode 100644 index 000000000000..2dfeb5965b72 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_lds_test.go @@ -0,0 +1,1872 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "strings" + "testing" + "time" + + v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2" + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/testutils/xds/e2e" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/durationpb" + + v1udpaudpatypepb "github.com/cncf/udpa/go/udpa/type/v1" + v3xdsxdstypepb "github.com/cncf/xds/go/xds/type/v3" + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + anypb "github.com/golang/protobuf/ptypes/any" + spb "github.com/golang/protobuf/ptypes/struct" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC HTTP filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. +) + +func (s) TestUnmarshalListener_ClientSide(t *testing.T) { + const ( + v3LDSTarget = "lds.target.good:3333" + v3RouteConfigName = "v3RouteConfig" + routeName = "routeName" + ) + + var ( + customFilter = &v3httppb.HttpFilter{ + Name: "customFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + } + oldTypedStructFilter = &v3httppb.HttpFilter{ + Name: "customFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: wrappedCustomFilterOldTypedStructConfig}, + } + newTypedStructFilter = &v3httppb.HttpFilter{ + Name: "customFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: wrappedCustomFilterNewTypedStructConfig}, + } + customOptionalFilter = &v3httppb.HttpFilter{ + Name: "customFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + IsOptional: true, + } + customFilter2 = &v3httppb.HttpFilter{ + Name: "customFilter2", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: customFilterConfig}, + } + errFilter = &v3httppb.HttpFilter{ + Name: "errFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: errFilterConfig}, + } + errOptionalFilter = &v3httppb.HttpFilter{ + Name: "errFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: errFilterConfig}, + IsOptional: true, + } + clientOnlyCustomFilter = &v3httppb.HttpFilter{ + Name: "clientOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: clientOnlyCustomFilterConfig}, + } + serverOnlyCustomFilter = &v3httppb.HttpFilter{ + Name: "serverOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, + } + serverOnlyOptionalCustomFilter = &v3httppb.HttpFilter{ + Name: "serverOnlyOptionalCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, + IsOptional: true, + } + unknownFilter = &v3httppb.HttpFilter{ + Name: "unknownFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: unknownFilterConfig}, + } + unknownOptionalFilter = &v3httppb.HttpFilter{ + Name: "unknownFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: unknownFilterConfig}, + IsOptional: true, + } + v3LisWithInlineRoute = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{v3LDSTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}}}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + }), + }, + }) + v3LisWithFilters = func(fs ...*v3httppb.HttpFilter) *anypb.Any { + fs = append(fs, emptyRouterFilter) + return testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny( + &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + HttpFilters: fs, + }), + }, + }) + } + v3LisToTestRBAC = func(xffNumTrustedHops uint32, originalIpDetectionExtensions []*v3corepb.TypedExtensionConfig) *anypb.Any { + return testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny( + &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + XffNumTrustedHops: xffNumTrustedHops, + OriginalIpDetectionExtensions: originalIpDetectionExtensions, + }), + }, + }) + } + + v3ListenerWithCDSConfigSourceSelf = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny( + &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Self{}, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }) + ) + + tests := []struct { + name string + resource *anypb.Any + wantName string + wantUpdate ListenerUpdate + wantErr bool + }{ + { + name: "non-listener resource", + resource: &anypb.Any{TypeUrl: version.V3HTTPConnManagerURL}, + wantErr: true, + }, + { + name: "badly marshaled listener resource", + resource: &anypb.Any{ + TypeUrl: version.V3ListenerURL, + Value: func() []byte { + lis := &v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: &anypb.Any{ + TypeUrl: version.V3HTTPConnManagerURL, + Value: []byte{1, 2, 3, 4}, + }, + }, + } + mLis, _ := proto.Marshal(lis) + return mLis + }(), + }, + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "wrong type in apiListener", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v2xdspb.Listener{}), + }, + }), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "empty httpConnMgr in apiListener", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{}, + }, + }), + }, + }), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "scopedRoutes routeConfig in apiListener", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_ScopedRoutes{}, + }), + }, + }), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "rds.ConfigSource in apiListener is Self", + resource: v3ListenerWithCDSConfigSourceSelf, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + HTTPFilters: []HTTPFilter{routerFilter}, + Raw: v3ListenerWithCDSConfigSourceSelf, + }, + }, + { + name: "rds.ConfigSource in apiListener is not ADS or Self", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Path{ + Path: "/some/path", + }, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + }), + }, + }), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "v3 with no filters", + resource: v3LisWithFilters(), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, + Raw: v3LisWithFilters(), + }, + }, + { + name: "v3 no terminal filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ApiListener: &v3listenerpb.ApiListener{ + ApiListener: testutils.MarshalAny( + &v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_Rds{ + Rds: &v3httppb.Rds{ + ConfigSource: &v3corepb.ConfigSource{ + ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{Ads: &v3corepb.AggregatedConfigSource{}}, + }, + RouteConfigName: v3RouteConfigName, + }, + }, + CommonHttpProtocolOptions: &v3corepb.HttpProtocolOptions{ + MaxStreamDuration: durationpb.New(time.Second), + }, + }), + }, + }), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "v3 with custom filter", + resource: v3LisWithFilters(customFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, + }, + routerFilter, + }, + Raw: v3LisWithFilters(customFilter), + }, + }, + { + name: "v3 with custom filter in old typed struct", + resource: v3LisWithFilters(oldTypedStructFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterOldTypedStructConfig}, + }, + routerFilter, + }, + Raw: v3LisWithFilters(oldTypedStructFilter), + }, + }, + { + name: "v3 with custom filter in new typed struct", + resource: v3LisWithFilters(newTypedStructFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterNewTypedStructConfig}, + }, + routerFilter, + }, + Raw: v3LisWithFilters(newTypedStructFilter), + }, + }, + { + name: "v3 with optional custom filter", + resource: v3LisWithFilters(customOptionalFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, + }, + routerFilter, + }, + Raw: v3LisWithFilters(customOptionalFilter), + }, + }, + { + name: "v3 with two filters with same name", + resource: v3LisWithFilters(customFilter, customFilter), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "v3 with two filters - same type different name", + resource: v3LisWithFilters(customFilter, customFilter2), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{{ + Name: "customFilter", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, + }, { + Name: "customFilter2", + Filter: httpFilter{}, + Config: filterConfig{Cfg: customFilterConfig}, + }, + routerFilter, + }, + Raw: v3LisWithFilters(customFilter, customFilter2), + }, + }, + { + name: "v3 with server-only filter", + resource: v3LisWithFilters(serverOnlyCustomFilter), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "v3 with optional server-only filter", + resource: v3LisWithFilters(serverOnlyOptionalCustomFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + Raw: v3LisWithFilters(serverOnlyOptionalCustomFilter), + HTTPFilters: routerFilterList, + }, + }, + { + name: "v3 with client-only filter", + resource: v3LisWithFilters(clientOnlyCustomFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{ + { + Name: "clientOnlyCustomFilter", + Filter: clientOnlyHTTPFilter{}, + Config: filterConfig{Cfg: clientOnlyCustomFilterConfig}, + }, + routerFilter}, + Raw: v3LisWithFilters(clientOnlyCustomFilter), + }, + }, + { + name: "v3 with err filter", + resource: v3LisWithFilters(errFilter), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "v3 with optional err filter", + resource: v3LisWithFilters(errOptionalFilter), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "v3 with unknown filter", + resource: v3LisWithFilters(unknownFilter), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "v3 with unknown filter (optional)", + resource: v3LisWithFilters(unknownOptionalFilter), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, + Raw: v3LisWithFilters(unknownOptionalFilter), + }, + }, + { + name: "v3 listener resource", + resource: v3LisWithFilters(), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, + Raw: v3LisWithFilters(), + }, + }, + { + name: "v3 listener resource wrapped", + resource: testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3LisWithFilters()}), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: routerFilterList, + Raw: v3LisWithFilters(), + }, + }, + // "To allow equating RBAC's direct_remote_ip and + // remote_ip...HttpConnectionManager.xff_num_trusted_hops must be unset + // or zero and HttpConnectionManager.original_ip_detection_extensions + // must be empty." - A41 + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-valid", + resource: v3LisToTestRBAC(0, nil), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + RouteConfigName: v3RouteConfigName, + MaxStreamDuration: time.Second, + HTTPFilters: []HTTPFilter{routerFilter}, + Raw: v3LisToTestRBAC(0, nil), + }, + }, + // In order to support xDS Configured RBAC HTTPFilter equating direct + // remote ip and remote ip, xffNumTrustedHops cannot be greater than + // zero. This is because if you can trust a ingress proxy hop when + // determining an origin clients ip address, direct remote ip != remote + // ip. + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-num-untrusted-hops", + resource: v3LisToTestRBAC(1, nil), + wantName: v3LDSTarget, + wantErr: true, + }, + // In order to support xDS Configured RBAC HTTPFilter equating direct + // remote ip and remote ip, originalIpDetectionExtensions must be empty. + // This is because if you have to ask ip-detection-extension for the + // original ip, direct remote ip might not equal remote ip. + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-original-ip-detection-extension", + resource: v3LisToTestRBAC(0, []*v3corepb.TypedExtensionConfig{{Name: "something"}}), + wantName: v3LDSTarget, + wantErr: true, + }, + { + name: "v3 listener with inline route configuration", + resource: v3LisWithInlineRoute, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InlineRouteConfig: &RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{v3LDSTarget}, + Routes: []*Route{{Prefix: newStringP("/"), WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, ActionType: RouteActionRoute}}, + }}}, + MaxStreamDuration: time.Second, + Raw: v3LisWithInlineRoute, + HTTPFilters: routerFilterList, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + name, update, err := unmarshalListenerResource(test.resource) + if (err != nil) != test.wantErr { + t.Errorf("unmarshalListenerResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) + } + if name != test.wantName { + t.Errorf("unmarshalListenerResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) + } + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalListenerResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) + } + }) + } +} + +func (s) TestUnmarshalListener_ServerSide(t *testing.T) { + oldRBAC := envconfig.XDSRBAC + envconfig.XDSRBAC = true + defer func() { + envconfig.XDSRBAC = oldRBAC + }() + const ( + v3LDSTarget = "grpc/server?xds.resource.listening_address=0.0.0.0:9999" + testVersion = "test-version-lds-server" + ) + + var ( + serverOnlyCustomFilter = &v3httppb.HttpFilter{ + Name: "serverOnlyCustomFilter", + ConfigType: &v3httppb.HttpFilter_TypedConfig{TypedConfig: serverOnlyCustomFilterConfig}, + } + routeConfig = &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}} + inlineRouteConfig = &RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*Route{{Prefix: newStringP("/"), ActionType: RouteActionNonForwardingAction}}, + }}} + emptyValidNetworkFilters = []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, + }, + } + localSocketAddress = &v3corepb.Address{ + Address: &v3corepb.Address_SocketAddress{ + SocketAddress: &v3corepb.SocketAddress{ + Address: "0.0.0.0", + PortSpecifier: &v3corepb.SocketAddress_PortValue{ + PortValue: 9999, + }, + }, + }, + } + listenerEmptyTransportSocket = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + }, + }, + }) + listenerNoValidationContextDeprecatedFields = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + }, + }), + }, + }, + }, + }) + listenerNoValidationContextNewFields = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + }, + }), + }, + }, + }, + }) + listenerWithValidationContextDeprecatedFields = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + }, + }, + }), + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextCertificateProviderInstance{ + ValidationContextCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "defaultRootPluginInstance", + CertificateName: "defaultRootCertName", + }, + }, + }, + }), + }, + }, + }, + }) + listenerWithValidationContextNewFields = testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContext{ + ValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "rootPluginInstance", + CertificateName: "rootCertName", + }, + }, + }, + }, + }), + }, + }, + }, + }, + DefaultFilterChain: &v3listenerpb.FilterChain{ + Name: "default-filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "defaultIdentityPluginInstance", + CertificateName: "defaultIdentityCertName", + }, + ValidationContextType: &v3tlspb.CommonTlsContext_CombinedValidationContext{ + CombinedValidationContext: &v3tlspb.CommonTlsContext_CombinedCertificateValidationContext{ + DefaultValidationContext: &v3tlspb.CertificateValidationContext{ + CaCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "defaultRootPluginInstance", + CertificateName: "defaultRootCertName", + }, + }, + }, + }, + }, + }), + }, + }, + }, + }) + ) + v3LisToTestRBAC := func(xffNumTrustedHops uint32, originalIpDetectionExtensions []*v3corepb.TypedExtensionConfig) *anypb.Any { + return testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + XffNumTrustedHops: xffNumTrustedHops, + OriginalIpDetectionExtensions: originalIpDetectionExtensions, + }), + }, + }, + }, + }, + }, + }) + } + v3LisWithBadRBACConfiguration := func(rbacCfg *v3rbacpb.RBAC) *anypb.Any { + return testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.HTTPFilter("rbac", rbacCfg), e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + }, + }) + } + badRBACCfgRegex := &v3rbacpb.RBAC{ + Rules: &rpb.RBAC{ + Action: rpb.RBAC_ALLOW, + Policies: map[string]*rpb.Policy{ + "bad-regex-value": { + Permissions: []*rpb.Permission{ + {Rule: &rpb.Permission_Any{Any: true}}, + }, + Principals: []*rpb.Principal{ + {Identifier: &rpb.Principal_Header{Header: &v3routepb.HeaderMatcher{Name: ":method", HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "["}}}}}, + }, + }, + }, + }, + } + badRBACCfgDestIP := &v3rbacpb.RBAC{ + Rules: &rpb.RBAC{ + Action: rpb.RBAC_ALLOW, + Policies: map[string]*rpb.Policy{ + "certain-destination-ip": { + Permissions: []*rpb.Permission{ + {Rule: &rpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*rpb.Principal{ + {Identifier: &rpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + } + + tests := []struct { + name string + resource *anypb.Any + wantName string + wantUpdate ListenerUpdate + wantErr string + }{ + { + name: "non-empty listener filters", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + ListenerFilters: []*v3listenerpb.ListenerFilter{ + {Name: "listener-filter-1"}, + }, + }), + wantName: v3LDSTarget, + wantErr: "unsupported field 'listener_filters'", + }, + { + name: "use_original_dst is set", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + UseOriginalDst: &wrapperspb.BoolValue{Value: true}, + }), + wantName: v3LDSTarget, + wantErr: "unsupported field 'use_original_dst'", + }, + { + name: "no address field", + resource: testutils.MarshalAny(&v3listenerpb.Listener{Name: v3LDSTarget}), + wantName: v3LDSTarget, + wantErr: "no address field in LDS response", + }, + { + name: "no socket address field", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: &v3corepb.Address{}, + }), + wantName: v3LDSTarget, + wantErr: "no socket_address field in LDS response", + }, + { + name: "no filter chains and no default filter chain", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{DestinationPort: &wrapperspb.UInt32Value{Value: 666}}, + Filters: emptyValidNetworkFilters, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "no supported filter chains and no default filter chain", + }, + { + name: "missing http connection manager network filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "missing HttpConnectionManager filter", + }, + { + name: "missing filter name in http filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{}), + }, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "missing name field in filter", + }, + { + name: "duplicate filter names in http filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter}, + }), + }, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "duplicate filter name", + }, + { + name: "no terminal filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + }), + }, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "http filters list is empty", + }, + { + name: "terminal filter not last", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{emptyRouterFilter, serverOnlyCustomFilter}, + }), + }, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "is a terminal filter but it is not last in the filter chain", + }, + { + name: "last not terminal filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: routeConfig, + }, + HttpFilters: []*v3httppb.HttpFilter{serverOnlyCustomFilter}, + }), + }, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "is not a terminal filter", + }, + { + name: "unsupported oneof in typed config of http filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_ConfigDiscovery{}, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "unsupported config_type", + }, + { + name: "overlapping filter chain match criteria", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{1, 2, 3, 4, 5}}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{}, + Filters: emptyValidNetworkFilters, + }, + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{SourcePorts: []uint32{5, 6, 7}}, + Filters: emptyValidNetworkFilters, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "multiple filter chains with overlapping matching rules are defined", + }, + { + name: "unsupported network filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.LocalReplyConfig{}), + }, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "unsupported network filter", + }, + { + name: "badly marshaled network filter", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: []*v3listenerpb.Filter{ + { + Name: "name", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3HTTPConnManagerURL, + Value: []byte{1, 2, 3, 4}, + }, + }, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "failed unmarshaling of network filter", + }, + { + name: "unexpected transport socket name", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "unsupported-transport-socket-name", + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "transport_socket field has unexpected name", + }, + { + name: "unexpected transport socket typedConfig URL", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.UpstreamTlsContext{}), + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "transport_socket field has unexpected typeURL", + }, + { + name: "badly marshaled transport socket", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: &anypb.Any{ + TypeUrl: version.V3DownstreamTLSContextURL, + Value: []byte{1, 2, 3, 4}, + }, + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "failed to unmarshal DownstreamTlsContext in LDS response", + }, + { + name: "missing CommonTlsContext", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{}), + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "DownstreamTlsContext in LDS response does not contain a CommonTlsContext", + }, + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-valid", + resource: v3LisToTestRBAC(0, nil), + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Raw: listenerEmptyTransportSocket, + }, + }, + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-num-untrusted-hops", + resource: v3LisToTestRBAC(1, nil), + wantName: v3LDSTarget, + wantErr: "xff_num_trusted_hops must be unset or zero", + }, + { + name: "rbac-allow-equating-direct-remote-ip-and-remote-ip-invalid-original-ip-detection-extension", + resource: v3LisToTestRBAC(0, []*v3corepb.TypedExtensionConfig{{Name: "something"}}), + wantName: v3LDSTarget, + wantErr: "original_ip_detection_extensions must be empty", + }, + { + name: "rbac-with-invalid-regex", + resource: v3LisWithBadRBACConfiguration(badRBACCfgRegex), + wantName: v3LDSTarget, + wantErr: "error parsing config for filter", + }, + { + name: "rbac-with-invalid-destination-ip-matcher", + resource: v3LisWithBadRBACConfiguration(badRBACCfgDestIP), + wantName: v3LDSTarget, + wantErr: "error parsing config for filter", + }, + { + name: "unsupported validation context in transport socket", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + ValidationContextType: &v3tlspb.CommonTlsContext_ValidationContextSdsSecretConfig{ + ValidationContextSdsSecretConfig: &v3tlspb.SdsSecretConfig{ + Name: "foo-sds-secret", + }, + }, + }, + }), + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "validation context contains unexpected type", + }, + { + name: "empty transport socket", + resource: listenerEmptyTransportSocket, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Raw: listenerEmptyTransportSocket, + }, + }, + { + name: "no identity and root certificate providers using deprecated fields", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", + }, + { + name: "no identity and root certificate providers using new fields", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + RequireClientCertificate: &wrapperspb.BoolValue{Value: true}, + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateProviderInstance: &v3tlspb.CertificateProviderPluginInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "security configuration on the server-side does not contain root certificate provider instance name, but require_client_cert field is set", + }, + { + name: "no identity certificate provider with require_client_cert", + resource: testutils.MarshalAny(&v3listenerpb.Listener{ + Name: v3LDSTarget, + Address: localSocketAddress, + FilterChains: []*v3listenerpb.FilterChain{ + { + Name: "filter-chain-1", + Filters: emptyValidNetworkFilters, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{}, + }), + }, + }, + }, + }, + }), + wantName: v3LDSTarget, + wantErr: "security configuration on the server-side does not contain identity certificate provider instance name", + }, + { + name: "happy case with no validation context using deprecated fields", + resource: listenerNoValidationContextDeprecatedFields, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + Raw: listenerNoValidationContextDeprecatedFields, + }, + }, + { + name: "happy case with no validation context using new fields", + resource: listenerNoValidationContextNewFields, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + Raw: listenerNoValidationContextNewFields, + }, + }, + { + name: "happy case with validation context provider instance with deprecated fields", + resource: listenerWithValidationContextDeprecatedFields, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + RootInstanceName: "rootPluginInstance", + RootCertName: "rootCertName", + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + RequireClientCert: true, + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + RootInstanceName: "defaultRootPluginInstance", + RootCertName: "defaultRootCertName", + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + RequireClientCert: true, + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + Raw: listenerWithValidationContextDeprecatedFields, + }, + }, + { + name: "happy case with validation context provider instance with new fields", + resource: listenerWithValidationContextNewFields, + wantName: v3LDSTarget, + wantUpdate: ListenerUpdate{ + InboundListenerCfg: &InboundListenerConfig{ + Address: "0.0.0.0", + Port: "9999", + FilterChains: &FilterChainManager{ + dstPrefixMap: map[string]*destPrefixEntry{ + unspecifiedPrefixMapKey: { + srcTypeArr: [3]*sourcePrefixes{ + { + srcPrefixMap: map[string]*sourcePrefixEntry{ + unspecifiedPrefixMapKey: { + srcPortMap: map[int]*FilterChain{ + 0: { + SecurityCfg: &SecurityConfig{ + RootInstanceName: "rootPluginInstance", + RootCertName: "rootCertName", + IdentityInstanceName: "identityPluginInstance", + IdentityCertName: "identityCertName", + RequireClientCert: true, + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + }, + }, + }, + }, + }, + def: &FilterChain{ + SecurityCfg: &SecurityConfig{ + RootInstanceName: "defaultRootPluginInstance", + RootCertName: "defaultRootCertName", + IdentityInstanceName: "defaultIdentityPluginInstance", + IdentityCertName: "defaultIdentityCertName", + RequireClientCert: true, + }, + InlineRouteConfig: inlineRouteConfig, + HTTPFilters: routerFilterList, + }, + }, + }, + Raw: listenerWithValidationContextNewFields, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + name, update, err := unmarshalListenerResource(test.resource) + if err != nil && !strings.Contains(err.Error(), test.wantErr) { + t.Errorf("unmarshalListenerResource(%s) = %v wantErr: %q", pretty.ToJSON(test.resource), err, test.wantErr) + } + if name != test.wantName { + t.Errorf("unmarshalListenerResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) + } + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalListenerResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) + } + }) + } +} + +type filterConfig struct { + httpfilter.FilterConfig + Cfg proto.Message + Override proto.Message +} + +// httpFilter allows testing the http filter registry and parsing functionality. +type httpFilter struct { + httpfilter.ClientInterceptorBuilder + httpfilter.ServerInterceptorBuilder +} + +func (httpFilter) TypeURLs() []string { return []string{"custom.filter"} } + +func (httpFilter) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + return filterConfig{Cfg: cfg}, nil +} + +func (httpFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + return filterConfig{Override: override}, nil +} + +func (httpFilter) IsTerminal() bool { + return false +} + +// errHTTPFilter returns errors no matter what is passed to ParseFilterConfig. +type errHTTPFilter struct { + httpfilter.ClientInterceptorBuilder +} + +func (errHTTPFilter) TypeURLs() []string { return []string{"err.custom.filter"} } + +func (errHTTPFilter) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + return nil, fmt.Errorf("error from ParseFilterConfig") +} + +func (errHTTPFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + return nil, fmt.Errorf("error from ParseFilterConfigOverride") +} + +func (errHTTPFilter) IsTerminal() bool { + return false +} + +func init() { + httpfilter.Register(httpFilter{}) + httpfilter.Register(errHTTPFilter{}) + httpfilter.Register(serverOnlyHTTPFilter{}) + httpfilter.Register(clientOnlyHTTPFilter{}) +} + +// serverOnlyHTTPFilter does not implement ClientInterceptorBuilder +type serverOnlyHTTPFilter struct { + httpfilter.ServerInterceptorBuilder +} + +func (serverOnlyHTTPFilter) TypeURLs() []string { return []string{"serverOnly.custom.filter"} } + +func (serverOnlyHTTPFilter) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + return filterConfig{Cfg: cfg}, nil +} + +func (serverOnlyHTTPFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + return filterConfig{Override: override}, nil +} + +func (serverOnlyHTTPFilter) IsTerminal() bool { + return false +} + +// clientOnlyHTTPFilter does not implement ServerInterceptorBuilder +type clientOnlyHTTPFilter struct { + httpfilter.ClientInterceptorBuilder +} + +func (clientOnlyHTTPFilter) TypeURLs() []string { return []string{"clientOnly.custom.filter"} } + +func (clientOnlyHTTPFilter) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { + return filterConfig{Cfg: cfg}, nil +} + +func (clientOnlyHTTPFilter) ParseFilterConfigOverride(override proto.Message) (httpfilter.FilterConfig, error) { + return filterConfig{Override: override}, nil +} + +func (clientOnlyHTTPFilter) IsTerminal() bool { + return false +} + +var customFilterConfig = &anypb.Any{ + TypeUrl: "custom.filter", + Value: []byte{1, 2, 3}, +} + +var errFilterConfig = &anypb.Any{ + TypeUrl: "err.custom.filter", + Value: []byte{1, 2, 3}, +} + +var serverOnlyCustomFilterConfig = &anypb.Any{ + TypeUrl: "serverOnly.custom.filter", + Value: []byte{1, 2, 3}, +} + +var clientOnlyCustomFilterConfig = &anypb.Any{ + TypeUrl: "clientOnly.custom.filter", + Value: []byte{1, 2, 3}, +} + +// This custom filter uses the old TypedStruct message from the cncf/udpa repo. +var customFilterOldTypedStructConfig = &v1udpaudpatypepb.TypedStruct{ + TypeUrl: "custom.filter", + Value: &spb.Struct{ + Fields: map[string]*spb.Value{ + "foo": {Kind: &spb.Value_StringValue{StringValue: "bar"}}, + }, + }, +} +var wrappedCustomFilterOldTypedStructConfig *anypb.Any + +// This custom filter uses the new TypedStruct message from the cncf/xds repo. +var customFilterNewTypedStructConfig = &v3xdsxdstypepb.TypedStruct{ + TypeUrl: "custom.filter", + Value: &spb.Struct{ + Fields: map[string]*spb.Value{ + "foo": {Kind: &spb.Value_StringValue{StringValue: "bar"}}, + }, + }, +} +var wrappedCustomFilterNewTypedStructConfig *anypb.Any + +func init() { + wrappedCustomFilterOldTypedStructConfig = testutils.MarshalAny(customFilterOldTypedStructConfig) + wrappedCustomFilterNewTypedStructConfig = testutils.MarshalAny(customFilterNewTypedStructConfig) +} + +var unknownFilterConfig = &anypb.Any{ + TypeUrl: "unknown.custom.filter", + Value: []byte{1, 2, 3}, +} + +func wrappedOptionalFilter(name string) *anypb.Any { + return testutils.MarshalAny(&v3routepb.FilterConfig{ + IsOptional: true, + Config: &anypb.Any{ + TypeUrl: name, + Value: []byte{1, 2, 3}, + }, + }) +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go new file mode 100644 index 000000000000..c51a0c24b508 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds.go @@ -0,0 +1,448 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "fmt" + "math" + "regexp" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/protobuf/types/known/anypb" + + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" +) + +func unmarshalRouteConfigResource(r *anypb.Any) (string, RouteConfigUpdate, error) { + r, err := UnwrapResource(r) + if err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unwrap resource: %v", err) + } + + if !IsRouteConfigResource(r.GetTypeUrl()) { + return "", RouteConfigUpdate{}, fmt.Errorf("unexpected resource type: %q ", r.GetTypeUrl()) + } + rc := &v3routepb.RouteConfiguration{} + if err := proto.Unmarshal(r.GetValue(), rc); err != nil { + return "", RouteConfigUpdate{}, fmt.Errorf("failed to unmarshal resource: %v", err) + } + + u, err := generateRDSUpdateFromRouteConfiguration(rc) + if err != nil { + return rc.GetName(), RouteConfigUpdate{}, err + } + u.Raw = r + return rc.GetName(), u, nil +} + +// generateRDSUpdateFromRouteConfiguration checks if the provided +// RouteConfiguration meets the expected criteria. If so, it returns a +// RouteConfigUpdate with nil error. +// +// A RouteConfiguration resource is considered valid when only if it contains a +// VirtualHost whose domain field matches the server name from the URI passed +// to the gRPC channel, and it contains a clusterName or a weighted cluster. +// +// The RouteConfiguration includes a list of virtualHosts, which may have zero +// or more elements. We are interested in the element whose domains field +// matches the server name specified in the "xds:" URI. The only field in the +// VirtualHost proto that the we are interested in is the list of routes. We +// only look at the last route in the list (the default route), whose match +// field must be empty and whose route field must be set. Inside that route +// message, the cluster field will contain the clusterName or weighted clusters +// we are looking for. +func generateRDSUpdateFromRouteConfiguration(rc *v3routepb.RouteConfiguration) (RouteConfigUpdate, error) { + vhs := make([]*VirtualHost, 0, len(rc.GetVirtualHosts())) + csps := make(map[string]clusterspecifier.BalancerConfig) + if envconfig.XDSRLS { + var err error + csps, err = processClusterSpecifierPlugins(rc.ClusterSpecifierPlugins) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + } + // cspNames represents all the cluster specifiers referenced by Route + // Actions - any cluster specifiers not referenced by a Route Action can be + // ignored and not emitted by the xdsclient. + var cspNames = make(map[string]bool) + for _, vh := range rc.GetVirtualHosts() { + routes, cspNs, err := routesProtoToSlice(vh.Routes, csps) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + for n := range cspNs { + cspNames[n] = true + } + rc, err := generateRetryConfig(vh.GetRetryPolicy()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("received route is invalid: %v", err) + } + vhOut := &VirtualHost{ + Domains: vh.GetDomains(), + Routes: routes, + RetryConfig: rc, + } + cfgs, err := processHTTPFilterOverrides(vh.GetTypedPerFilterConfig()) + if err != nil { + return RouteConfigUpdate{}, fmt.Errorf("virtual host %+v: %v", vh, err) + } + vhOut.HTTPFilterConfigOverride = cfgs + vhs = append(vhs, vhOut) + } + + // "For any entry in the RouteConfiguration.cluster_specifier_plugins not + // referenced by an enclosed ActionType's cluster_specifier_plugin, the xDS + // client should not provide it to its consumers." - RLS in xDS Design + for name := range csps { + if !cspNames[name] { + delete(csps, name) + } + } + + return RouteConfigUpdate{VirtualHosts: vhs, ClusterSpecifierPlugins: csps}, nil +} + +func processClusterSpecifierPlugins(csps []*v3routepb.ClusterSpecifierPlugin) (map[string]clusterspecifier.BalancerConfig, error) { + cspCfgs := make(map[string]clusterspecifier.BalancerConfig) + // "The xDS client will inspect all elements of the + // cluster_specifier_plugins field looking up a plugin based on the + // extension.typed_config of each." - RLS in xDS design + for _, csp := range csps { + cs := clusterspecifier.Get(csp.GetExtension().GetTypedConfig().GetTypeUrl()) + if cs == nil { + if csp.GetIsOptional() { + // "If a plugin is not supported but has is_optional set, then + // we will ignore any routes that point to that plugin" + cspCfgs[csp.GetExtension().GetName()] = nil + continue + } + // "If no plugin is registered for it, the resource will be NACKed." + // - RLS in xDS design + return nil, fmt.Errorf("cluster specifier %q of type %q was not found", csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) + } + lbCfg, err := cs.ParseClusterSpecifierConfig(csp.GetExtension().GetTypedConfig()) + if err != nil { + // "If a plugin is found, the value of the typed_config field will + // be passed to it's conversion method, and if an error is + // encountered, the resource will be NACKED." - RLS in xDS design + return nil, fmt.Errorf("error: %q parsing config %q for cluster specifier %q of type %q", err, csp.GetExtension().GetTypedConfig(), csp.GetExtension().GetName(), csp.GetExtension().GetTypedConfig().GetTypeUrl()) + } + // "If all cluster specifiers are valid, the xDS client will store the + // configurations in a map keyed by the name of the extension instance." - + // RLS in xDS Design + cspCfgs[csp.GetExtension().GetName()] = lbCfg + } + return cspCfgs, nil +} + +func generateRetryConfig(rp *v3routepb.RetryPolicy) (*RetryConfig, error) { + if rp == nil { + return nil, nil + } + + cfg := &RetryConfig{RetryOn: make(map[codes.Code]bool)} + for _, s := range strings.Split(rp.GetRetryOn(), ",") { + switch strings.TrimSpace(strings.ToLower(s)) { + case "cancelled": + cfg.RetryOn[codes.Canceled] = true + case "deadline-exceeded": + cfg.RetryOn[codes.DeadlineExceeded] = true + case "internal": + cfg.RetryOn[codes.Internal] = true + case "resource-exhausted": + cfg.RetryOn[codes.ResourceExhausted] = true + case "unavailable": + cfg.RetryOn[codes.Unavailable] = true + } + } + + if rp.NumRetries == nil { + cfg.NumRetries = 1 + } else { + cfg.NumRetries = rp.GetNumRetries().Value + if cfg.NumRetries < 1 { + return nil, fmt.Errorf("retry_policy.num_retries = %v; must be >= 1", cfg.NumRetries) + } + } + + backoff := rp.GetRetryBackOff() + if backoff == nil { + cfg.RetryBackoff.BaseInterval = 25 * time.Millisecond + } else { + cfg.RetryBackoff.BaseInterval = backoff.GetBaseInterval().AsDuration() + if cfg.RetryBackoff.BaseInterval <= 0 { + return nil, fmt.Errorf("retry_policy.base_interval = %v; must be > 0", cfg.RetryBackoff.BaseInterval) + } + } + if max := backoff.GetMaxInterval(); max == nil { + cfg.RetryBackoff.MaxInterval = 10 * cfg.RetryBackoff.BaseInterval + } else { + cfg.RetryBackoff.MaxInterval = max.AsDuration() + if cfg.RetryBackoff.MaxInterval <= 0 { + return nil, fmt.Errorf("retry_policy.max_interval = %v; must be > 0", cfg.RetryBackoff.MaxInterval) + } + } + + if len(cfg.RetryOn) == 0 { + return &RetryConfig{}, nil + } + return cfg, nil +} + +func routesProtoToSlice(routes []*v3routepb.Route, csps map[string]clusterspecifier.BalancerConfig) ([]*Route, map[string]bool, error) { + var routesRet []*Route + var cspNames = make(map[string]bool) + for _, r := range routes { + match := r.GetMatch() + if match == nil { + return nil, nil, fmt.Errorf("route %+v doesn't have a match", r) + } + + if len(match.GetQueryParameters()) != 0 { + // Ignore route with query parameters. + logger.Warningf("Ignoring route %+v with query parameter matchers", r) + continue + } + + pathSp := match.GetPathSpecifier() + if pathSp == nil { + return nil, nil, fmt.Errorf("route %+v doesn't have a path specifier", r) + } + + var route Route + switch pt := pathSp.(type) { + case *v3routepb.RouteMatch_Prefix: + route.Prefix = &pt.Prefix + case *v3routepb.RouteMatch_Path: + route.Path = &pt.Path + case *v3routepb.RouteMatch_SafeRegex: + regex := pt.SafeRegex.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + route.Regex = re + default: + return nil, nil, fmt.Errorf("route %+v has an unrecognized path specifier: %+v", r, pt) + } + + if caseSensitive := match.GetCaseSensitive(); caseSensitive != nil { + route.CaseInsensitive = !caseSensitive.Value + } + + for _, h := range match.GetHeaders() { + var header HeaderMatcher + switch ht := h.GetHeaderMatchSpecifier().(type) { + case *v3routepb.HeaderMatcher_ExactMatch: + header.ExactMatch = &ht.ExactMatch + case *v3routepb.HeaderMatcher_SafeRegexMatch: + regex := ht.SafeRegexMatch.GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, nil, fmt.Errorf("route %+v contains an invalid regex %q", r, regex) + } + header.RegexMatch = re + case *v3routepb.HeaderMatcher_RangeMatch: + header.RangeMatch = &Int64Range{ + Start: ht.RangeMatch.Start, + End: ht.RangeMatch.End, + } + case *v3routepb.HeaderMatcher_PresentMatch: + header.PresentMatch = &ht.PresentMatch + case *v3routepb.HeaderMatcher_PrefixMatch: + header.PrefixMatch = &ht.PrefixMatch + case *v3routepb.HeaderMatcher_SuffixMatch: + header.SuffixMatch = &ht.SuffixMatch + case *v3routepb.HeaderMatcher_StringMatch: + sm, err := matcher.StringMatcherFromProto(ht.StringMatch) + if err != nil { + return nil, nil, fmt.Errorf("route %+v has an invalid string matcher: %v", err, ht.StringMatch) + } + header.StringMatch = &sm + default: + return nil, nil, fmt.Errorf("route %+v has an unrecognized header matcher: %+v", r, ht) + } + header.Name = h.GetName() + invert := h.GetInvertMatch() + header.InvertMatch = &invert + route.Headers = append(route.Headers, &header) + } + + if fr := match.GetRuntimeFraction(); fr != nil { + d := fr.GetDefaultValue() + n := d.GetNumerator() + switch d.GetDenominator() { + case v3typepb.FractionalPercent_HUNDRED: + n *= 10000 + case v3typepb.FractionalPercent_TEN_THOUSAND: + n *= 100 + case v3typepb.FractionalPercent_MILLION: + } + route.Fraction = &n + } + + switch r.GetAction().(type) { + case *v3routepb.Route_Route: + route.WeightedClusters = make(map[string]WeightedCluster) + action := r.GetRoute() + + // Hash Policies are only applicable for a Ring Hash LB. + if envconfig.XDSRingHash { + hp, err := hashPoliciesProtoToSlice(action.HashPolicy) + if err != nil { + return nil, nil, err + } + route.HashPolicies = hp + } + + switch a := action.GetClusterSpecifier().(type) { + case *v3routepb.RouteAction_Cluster: + route.WeightedClusters[a.Cluster] = WeightedCluster{Weight: 1} + case *v3routepb.RouteAction_WeightedClusters: + wcs := a.WeightedClusters + var totalWeight uint64 + for _, c := range wcs.Clusters { + w := c.GetWeight().GetValue() + if w == 0 { + continue + } + totalWeight += uint64(w) + if totalWeight > math.MaxUint32 { + return nil, nil, fmt.Errorf("xds: total weight of clusters exceeds MaxUint32") + } + wc := WeightedCluster{Weight: w} + cfgs, err := processHTTPFilterOverrides(c.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, a, err) + } + wc.HTTPFilterConfigOverride = cfgs + route.WeightedClusters[c.GetName()] = wc + } + if totalWeight == 0 { + return nil, nil, fmt.Errorf("route %+v, action %+v, has no valid cluster in WeightedCluster action", r, a) + } + case *v3routepb.RouteAction_ClusterSpecifierPlugin: + // gRFC A28 was updated to say the following: + // + // The route’s action field must be route, and its + // cluster_specifier: + // - Can be Cluster + // - Can be Weighted_clusters + // - Can be unset or an unsupported field. The route containing + // this action will be ignored. + // + // This means that if this env var is not set, we should treat + // it as if it we didn't know about the cluster_specifier_plugin + // at all. + if !envconfig.XDSRLS { + logger.Warningf("Ignoring route %+v with unsupported route_action field: cluster_specifier_plugin", r) + continue + } + if _, ok := csps[a.ClusterSpecifierPlugin]; !ok { + // "When processing RouteActions, if any action includes a + // cluster_specifier_plugin value that is not in + // RouteConfiguration.cluster_specifier_plugins, the + // resource will be NACKed." - RLS in xDS design + return nil, nil, fmt.Errorf("route %+v, action %+v, specifies a cluster specifier plugin %+v that is not in Route Configuration", r, a, a.ClusterSpecifierPlugin) + } + if csps[a.ClusterSpecifierPlugin] == nil { + logger.Warningf("Ignoring route %+v with optional and unsupported cluster specifier plugin %+v", r, a.ClusterSpecifierPlugin) + continue + } + cspNames[a.ClusterSpecifierPlugin] = true + route.ClusterSpecifierPlugin = a.ClusterSpecifierPlugin + default: + logger.Warningf("Ignoring route %+v with unknown ClusterSpecifier %+v", r, a) + continue + } + + msd := action.GetMaxStreamDuration() + // Prefer grpc_timeout_header_max, if set. + dur := msd.GetGrpcTimeoutHeaderMax() + if dur == nil { + dur = msd.GetMaxStreamDuration() + } + if dur != nil { + d := dur.AsDuration() + route.MaxStreamDuration = &d + } + + var err error + route.RetryConfig, err = generateRetryConfig(action.GetRetryPolicy()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v, action %+v: %v", r, action, err) + } + + route.ActionType = RouteActionRoute + + case *v3routepb.Route_NonForwardingAction: + // Expected to be used on server side. + route.ActionType = RouteActionNonForwardingAction + default: + route.ActionType = RouteActionUnsupported + } + + cfgs, err := processHTTPFilterOverrides(r.GetTypedPerFilterConfig()) + if err != nil { + return nil, nil, fmt.Errorf("route %+v: %v", r, err) + } + route.HTTPFilterConfigOverride = cfgs + routesRet = append(routesRet, &route) + } + return routesRet, cspNames, nil +} + +func hashPoliciesProtoToSlice(policies []*v3routepb.RouteAction_HashPolicy) ([]*HashPolicy, error) { + var hashPoliciesRet []*HashPolicy + for _, p := range policies { + policy := HashPolicy{Terminal: p.Terminal} + switch p.GetPolicySpecifier().(type) { + case *v3routepb.RouteAction_HashPolicy_Header_: + policy.HashPolicyType = HashPolicyTypeHeader + policy.HeaderName = p.GetHeader().GetHeaderName() + if rr := p.GetHeader().GetRegexRewrite(); rr != nil { + regex := rr.GetPattern().GetRegex() + re, err := regexp.Compile(regex) + if err != nil { + return nil, fmt.Errorf("hash policy %+v contains an invalid regex %q", p, regex) + } + policy.Regex = re + policy.RegexSubstitution = rr.GetSubstitution() + } + case *v3routepb.RouteAction_HashPolicy_FilterState_: + if p.GetFilterState().GetKey() != "io.grpc.channel_id" { + logger.Warningf("Ignoring hash policy %+v with invalid key for filter state policy %q", p, p.GetFilterState().GetKey()) + continue + } + policy.HashPolicyType = HashPolicyTypeChannelID + default: + logger.Warningf("Ignoring unsupported hash policy %T", p.GetPolicySpecifier()) + continue + } + + hashPoliciesRet = append(hashPoliciesRet, &policy) + } + return hashPoliciesRet, nil +} diff --git a/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go new file mode 100644 index 000000000000..fa10d2aa2694 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/unmarshal_rds_test.go @@ -0,0 +1,1654 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xdsresource + +import ( + "errors" + "fmt" + "math" + "regexp" + "testing" + "time" + + v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/testutils" + "google.golang.org/grpc/internal/xds/matcher" + "google.golang.org/grpc/xds/internal/clusterspecifier" + "google.golang.org/grpc/xds/internal/httpfilter" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version" + "google.golang.org/protobuf/types/known/durationpb" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + rpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3rbacpb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3" + v3matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3" + v3typepb "github.com/envoyproxy/go-control-plane/envoy/type/v3" + anypb "github.com/golang/protobuf/ptypes/any" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" +) + +func (s) TestRDSGenerateRDSUpdateFromRouteConfiguration(t *testing.T) { + const ( + uninterestingDomain = "uninteresting.domain" + uninterestingClusterName = "uninterestingClusterName" + ldsTarget = "lds.target.good:1111" + routeName = "routeName" + clusterName = "clusterName" + ) + + var ( + goodRouteConfigWithFilterConfigs = func(cfgs map[string]*anypb.Any) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}}, + }, + }}, + TypedPerFilterConfig: cfgs, + }}, + } + } + goodRouteConfigWithClusterSpecifierPlugins = func(csps []*v3routepb.ClusterSpecifierPlugin, cspReferences []string) *v3routepb.RouteConfiguration { + var rs []*v3routepb.Route + + for i, cspReference := range cspReferences { + rs = append(rs, &v3routepb.Route{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: fmt.Sprint(i + 1)}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{ClusterSpecifierPlugin: cspReference}, + }, + }, + }) + } + + rc := &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: rs, + }}, + ClusterSpecifierPlugins: csps, + } + + return rc + } + goodRouteConfigWithClusterSpecifierPluginsAndNormalRoute = func(csps []*v3routepb.ClusterSpecifierPlugin, cspReferences []string) *v3routepb.RouteConfiguration { + rs := goodRouteConfigWithClusterSpecifierPlugins(csps, cspReferences) + rs.VirtualHosts[0].Routes = append(rs.VirtualHosts[0].Routes, &v3routepb.Route{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + CaseSensitive: &wrapperspb.BoolValue{Value: false}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}}) + return rs + } + goodRouteConfigWithUnsupportedClusterSpecifier = &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + CaseSensitive: &wrapperspb.BoolValue{Value: false}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}}, + }}, + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "|"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_ClusterHeader{}}, + }}, + }, + }, + }, + } + + goodUpdateWithFilterConfigs = func(cfgs map[string]httpfilter.FilterConfig) RouteConfigUpdate { + return RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + ActionType: RouteActionRoute, + }}, + HTTPFilterConfigOverride: cfgs, + }}, + } + } + goodUpdate = RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: nil, + }}, + } + goodUpdateWithNormalRoute = RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP("/"), + CaseInsensitive: true, + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + } + goodUpdateWithClusterSpecifierPluginA = RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("1"), + ActionType: RouteActionRoute, + ClusterSpecifierPlugin: "cspA", + }}, + }}, + ClusterSpecifierPlugins: map[string]clusterspecifier.BalancerConfig{ + "cspA": nil, + }, + } + clusterSpecifierPlugin = func(name string, config *anypb.Any, isOptional bool) *v3routepb.ClusterSpecifierPlugin { + return &v3routepb.ClusterSpecifierPlugin{ + Extension: &v3corepb.TypedExtensionConfig{ + Name: name, + TypedConfig: config, + }, + IsOptional: isOptional, + } + } + goodRouteConfigWithRetryPolicy = func(vhrp *v3routepb.RetryPolicy, rrp *v3routepb.RetryPolicy) *v3routepb.RouteConfiguration { + return &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + RetryPolicy: rrp, + }, + }, + }}, + RetryPolicy: vhrp, + }}, + } + } + goodUpdateWithRetryPolicy = func(vhrc *RetryConfig, rrc *RetryConfig) RouteConfigUpdate { + return RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + ActionType: RouteActionRoute, + RetryConfig: rrc, + }}, + RetryConfig: vhrc, + }}, + } + } + defaultRetryBackoff = RetryBackoff{BaseInterval: 25 * time.Millisecond, MaxInterval: 250 * time.Millisecond} + ) + + oldRLS := envconfig.XDSRLS + defer func() { + envconfig.XDSRLS = oldRLS + }() + + tests := []struct { + name string + rc *v3routepb.RouteConfiguration + wantUpdate RouteConfigUpdate + wantError bool + rlsEnabled bool + }{ + { + name: "default-route-match-field-is-nil", + rc: &v3routepb.RouteConfiguration{ + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }, + }, + }, + }, + }, + }, + }, + wantError: true, + }, + { + name: "default-route-match-field-is-non-nil", + rc: &v3routepb.RouteConfiguration{ + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{}, + Action: &v3routepb.Route_Route{}, + }, + }, + }, + }, + }, + wantError: true, + }, + { + name: "default-route-routeaction-field-is-nil", + rc: &v3routepb.RouteConfiguration{ + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{}}, + }, + }, + }, + wantError: true, + }, + { + name: "default-route-cluster-field-is-empty", + rc: &v3routepb.RouteConfiguration{ + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterHeader{}, + }, + }, + }, + }, + }, + }, + }, + wantError: true, + }, + { + // default route's match sets case-sensitive to false. + name: "good-route-config-but-with-casesensitive-false", + rc: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + CaseSensitive: &wrapperspb.BoolValue{Value: false}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }}}}}}}, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP("/"), + CaseInsensitive: true, + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + }, + }, + { + name: "good-route-config-with-empty-string-route", + rc: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, + }, + }, + }, + }, + }, + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }, + }, + }, + }, + }, + }, + }, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + }, + }, + { + // default route's match is not empty string, but "/". + name: "good-route-config-with-slash-string-route", + rc: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + }, + }, + }, + }, + }, + }, + }, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + }, + }, + { + name: "good-route-config-with-weighted_clusters", + rc: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "a", Weight: &wrapperspb.UInt32Value{Value: 2}}, + {Name: "b", Weight: &wrapperspb.UInt32Value{Value: 3}}, + {Name: "c", Weight: &wrapperspb.UInt32Value{Value: 5}}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{ + "a": {Weight: 2}, + "b": {Weight: 3}, + "c": {Weight: 5}, + }, + ActionType: RouteActionRoute, + }}, + }, + }, + }, + }, + { + name: "good-route-config-with-max-stream-duration", + rc: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{MaxStreamDuration: durationpb.New(time.Second)}, + }, + }, + }, + }, + }, + }, + }, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + MaxStreamDuration: newDurationP(time.Second), + ActionType: RouteActionRoute, + }}, + }, + }, + }, + }, + { + name: "good-route-config-with-grpc-timeout-header-max", + rc: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{GrpcTimeoutHeaderMax: durationpb.New(time.Second)}, + }, + }, + }, + }, + }, + }, + }, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + MaxStreamDuration: newDurationP(time.Second), + ActionType: RouteActionRoute, + }}, + }, + }, + }, + }, + { + name: "good-route-config-with-both-timeouts", + rc: &v3routepb.RouteConfiguration{ + Name: routeName, + VirtualHosts: []*v3routepb.VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}, + MaxStreamDuration: &v3routepb.RouteAction_MaxStreamDuration{MaxStreamDuration: durationpb.New(2 * time.Second), GrpcTimeoutHeaderMax: durationpb.New(0)}, + }, + }, + }, + }, + }, + }, + }, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{ldsTarget}, + Routes: []*Route{{ + Prefix: newStringP("/"), + WeightedClusters: map[string]WeightedCluster{clusterName: {Weight: 1}}, + MaxStreamDuration: newDurationP(0), + ActionType: RouteActionRoute, + }}, + }, + }, + }, + }, + { + name: "good-route-config-with-http-filter-config", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), + wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), + }, + { + name: "good-route-config-with-http-filter-config-in-old-typed-struct", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterOldTypedStructConfig}), + wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterOldTypedStructConfig}}), + }, + { + name: "good-route-config-with-http-filter-config-in-new-typed-struct", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterNewTypedStructConfig}), + wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterNewTypedStructConfig}}), + }, + { + name: "good-route-config-with-optional-http-filter-config", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("custom.filter")}), + wantUpdate: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), + }, + { + name: "good-route-config-with-http-err-filter-config", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), + wantError: true, + }, + { + name: "good-route-config-with-http-optional-err-filter-config", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("err.custom.filter")}), + wantError: true, + }, + { + name: "good-route-config-with-http-unknown-filter-config", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": unknownFilterConfig}), + wantError: true, + }, + { + name: "good-route-config-with-http-optional-unknown-filter-config", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("unknown.custom.filter")}), + wantUpdate: goodUpdateWithFilterConfigs(nil), + }, + { + name: "good-route-config-with-bad-rbac-http-filter-configuration", + rc: goodRouteConfigWithFilterConfigs(map[string]*anypb.Any{"rbac": testutils.MarshalAny(&v3rbacpb.RBACPerRoute{Rbac: &v3rbacpb.RBAC{ + Rules: &rpb.RBAC{ + Action: rpb.RBAC_ALLOW, + Policies: map[string]*rpb.Policy{ + "certain-destination-ip": { + Permissions: []*rpb.Permission{ + {Rule: &rpb.Permission_DestinationIp{DestinationIp: &v3corepb.CidrRange{AddressPrefix: "not a correct address", PrefixLen: &wrapperspb.UInt32Value{Value: uint32(10)}}}}, + }, + Principals: []*rpb.Principal{ + {Identifier: &rpb.Principal_Any{Any: true}}, + }, + }, + }, + }, + }})}), + wantError: true, + }, + { + name: "good-route-config-with-retry-policy", + rc: goodRouteConfigWithRetryPolicy( + &v3routepb.RetryPolicy{RetryOn: "cancelled"}, + &v3routepb.RetryPolicy{RetryOn: "deadline-exceeded,unsupported", NumRetries: &wrapperspb.UInt32Value{Value: 2}}), + wantUpdate: goodUpdateWithRetryPolicy( + &RetryConfig{RetryOn: map[codes.Code]bool{codes.Canceled: true}, NumRetries: 1, RetryBackoff: defaultRetryBackoff}, + &RetryConfig{RetryOn: map[codes.Code]bool{codes.DeadlineExceeded: true}, NumRetries: 2, RetryBackoff: defaultRetryBackoff}), + }, + { + name: "good-route-config-with-retry-backoff", + rc: goodRouteConfigWithRetryPolicy( + &v3routepb.RetryPolicy{RetryOn: "internal", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{BaseInterval: durationpb.New(10 * time.Millisecond), MaxInterval: durationpb.New(10 * time.Millisecond)}}, + &v3routepb.RetryPolicy{RetryOn: "resource-exhausted", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{BaseInterval: durationpb.New(10 * time.Millisecond)}}), + wantUpdate: goodUpdateWithRetryPolicy( + &RetryConfig{RetryOn: map[codes.Code]bool{codes.Internal: true}, NumRetries: 1, RetryBackoff: RetryBackoff{BaseInterval: 10 * time.Millisecond, MaxInterval: 10 * time.Millisecond}}, + &RetryConfig{RetryOn: map[codes.Code]bool{codes.ResourceExhausted: true}, NumRetries: 1, RetryBackoff: RetryBackoff{BaseInterval: 10 * time.Millisecond, MaxInterval: 100 * time.Millisecond}}), + }, + { + name: "bad-retry-policy-0-retries", + rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", NumRetries: &wrapperspb.UInt32Value{Value: 0}}, nil), + wantUpdate: RouteConfigUpdate{}, + wantError: true, + }, + { + name: "bad-retry-policy-0-base-interval", + rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{BaseInterval: durationpb.New(0)}}, nil), + wantUpdate: RouteConfigUpdate{}, + wantError: true, + }, + { + name: "bad-retry-policy-negative-max-interval", + rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "cancelled", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{MaxInterval: durationpb.New(-time.Second)}}, nil), + wantUpdate: RouteConfigUpdate{}, + wantError: true, + }, + { + name: "bad-retry-policy-negative-max-interval-no-known-retry-on", + rc: goodRouteConfigWithRetryPolicy(&v3routepb.RetryPolicy{RetryOn: "something", RetryBackOff: &v3routepb.RetryPolicy_RetryBackOff{MaxInterval: durationpb.New(-time.Second)}}, nil), + wantUpdate: RouteConfigUpdate{}, + wantError: true, + }, + { + name: "cluster-specifier-declared-which-not-registered", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, false), + }, []string{"cspA"}), + wantError: true, + rlsEnabled: true, + }, + { + name: "error-in-cluster-specifier-plugin-conversion-method", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", errorClusterSpecifierConfig, false), + }, []string{"cspA"}), + wantError: true, + rlsEnabled: true, + }, + { + name: "route-action-that-references-undeclared-cluster-specifier-plugin", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), + }, []string{"cspA", "cspB"}), + wantError: true, + rlsEnabled: true, + }, + { + name: "emitted-cluster-specifier-plugins", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), + }, []string{"cspA"}), + wantUpdate: goodUpdateWithClusterSpecifierPluginA, + rlsEnabled: true, + }, + { + name: "deleted-cluster-specifier-plugins-not-referenced", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), + clusterSpecifierPlugin("cspB", mockClusterSpecifierConfig, false), + }, []string{"cspA"}), + wantUpdate: goodUpdateWithClusterSpecifierPluginA, + rlsEnabled: true, + }, + { + name: "ignore-error-in-cluster-specifier-plugin-env-var-off", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, false), + }, []string{}), + wantUpdate: goodUpdate, + }, + { + name: "cluster-specifier-plugin-referenced-env-var-off", + rc: goodRouteConfigWithClusterSpecifierPlugins([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", mockClusterSpecifierConfig, false), + }, []string{"cspA"}), + wantUpdate: goodUpdate, + }, + // This tests a scenario where a cluster specifier plugin is not found + // and is optional. Any routes referencing that not found optional + // cluster specifier plugin should be ignored. The config has two + // routes, and only one of them should be present in the update. + { + name: "cluster-specifier-plugin-not-found-and-optional-route-should-ignore", + rc: goodRouteConfigWithClusterSpecifierPluginsAndNormalRoute([]*v3routepb.ClusterSpecifierPlugin{ + clusterSpecifierPlugin("cspA", configOfClusterSpecifierDoesntExist, true), + }, []string{"cspA"}), + wantUpdate: goodUpdateWithNormalRoute, + rlsEnabled: true, + }, + // This tests a scenario where a route has an unsupported cluster + // specifier. Any routes with an unsupported cluster specifier should be + // ignored. The config has two routes, and only one of them should be + // present in the update. + { + name: "unsupported-cluster-specifier-route-should-ignore", + rc: goodRouteConfigWithUnsupportedClusterSpecifier, + wantUpdate: goodUpdateWithNormalRoute, + rlsEnabled: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + envconfig.XDSRLS = test.rlsEnabled + gotUpdate, gotError := generateRDSUpdateFromRouteConfiguration(test.rc) + if (gotError != nil) != test.wantError || + !cmp.Equal(gotUpdate, test.wantUpdate, cmpopts.EquateEmpty(), + cmp.Transformer("FilterConfig", func(fc httpfilter.FilterConfig) string { + return fmt.Sprint(fc) + })) { + t.Errorf("generateRDSUpdateFromRouteConfiguration(%+v, %v) returned unexpected, diff (-want +got):\\n%s", test.rc, ldsTarget, cmp.Diff(test.wantUpdate, gotUpdate, cmpopts.EquateEmpty())) + } + }) + } +} + +var configOfClusterSpecifierDoesntExist = &anypb.Any{ + TypeUrl: "does.not.exist", + Value: []byte{1, 2, 3}, +} + +var mockClusterSpecifierConfig = &anypb.Any{ + TypeUrl: "mock.cluster.specifier.plugin", + Value: []byte{1, 2, 3}, +} + +var errorClusterSpecifierConfig = &anypb.Any{ + TypeUrl: "error.cluster.specifier.plugin", + Value: []byte{1, 2, 3}, +} + +func init() { + clusterspecifier.Register(mockClusterSpecifierPlugin{}) + clusterspecifier.Register(errorClusterSpecifierPlugin{}) +} + +type mockClusterSpecifierPlugin struct { +} + +func (mockClusterSpecifierPlugin) TypeURLs() []string { + return []string{"mock.cluster.specifier.plugin"} +} + +func (mockClusterSpecifierPlugin) ParseClusterSpecifierConfig(proto.Message) (clusterspecifier.BalancerConfig, error) { + return []map[string]interface{}{}, nil +} + +type errorClusterSpecifierPlugin struct{} + +func (errorClusterSpecifierPlugin) TypeURLs() []string { + return []string{"error.cluster.specifier.plugin"} +} + +func (errorClusterSpecifierPlugin) ParseClusterSpecifierConfig(proto.Message) (clusterspecifier.BalancerConfig, error) { + return nil, errors.New("error from cluster specifier conversion function") +} + +func (s) TestUnmarshalRouteConfig(t *testing.T) { + const ( + ldsTarget = "lds.target.good:1111" + uninterestingDomain = "uninteresting.domain" + uninterestingClusterName = "uninterestingClusterName" + v3RouteConfigName = "v3RouteConfig" + v3ClusterName = "v3Cluster" + ) + + var ( + v3VirtualHost = []*v3routepb.VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: uninterestingClusterName}, + }, + }, + }, + }, + }, + { + Domains: []string{ldsTarget}, + Routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: ""}}, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: v3ClusterName}, + }, + }, + }, + }, + }, + } + v3RouteConfig = testutils.MarshalAny(&v3routepb.RouteConfiguration{ + Name: v3RouteConfigName, + VirtualHosts: v3VirtualHost, + }) + ) + + tests := []struct { + name string + resource *anypb.Any + wantName string + wantUpdate RouteConfigUpdate + wantErr bool + }{ + { + name: "non-routeConfig resource type", + resource: &anypb.Any{TypeUrl: version.V3HTTPConnManagerURL}, + wantErr: true, + }, + { + name: "badly marshaled routeconfig resource", + resource: &anypb.Any{ + TypeUrl: version.V3RouteConfigURL, + Value: []byte{1, 2, 3, 4}, + }, + wantErr: true, + }, + { + name: "v3 routeConfig resource", + resource: v3RouteConfig, + wantName: v3RouteConfigName, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + Raw: v3RouteConfig, + }, + }, + { + name: "v3 routeConfig resource wrapped", + resource: testutils.MarshalAny(&v3discoverypb.Resource{Resource: v3RouteConfig}), + wantName: v3RouteConfigName, + wantUpdate: RouteConfigUpdate{ + VirtualHosts: []*VirtualHost{ + { + Domains: []string{uninterestingDomain}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{uninterestingClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + { + Domains: []string{ldsTarget}, + Routes: []*Route{{Prefix: newStringP(""), + WeightedClusters: map[string]WeightedCluster{v3ClusterName: {Weight: 1}}, + ActionType: RouteActionRoute}}, + }, + }, + Raw: v3RouteConfig, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + name, update, err := unmarshalRouteConfigResource(test.resource) + if (err != nil) != test.wantErr { + t.Errorf("unmarshalRouteConfigResource(%s), got err: %v, wantErr: %v", pretty.ToJSON(test.resource), err, test.wantErr) + } + if name != test.wantName { + t.Errorf("unmarshalRouteConfigResource(%s), got name: %s, want: %s", pretty.ToJSON(test.resource), name, test.wantName) + } + if diff := cmp.Diff(update, test.wantUpdate, cmpOpts); diff != "" { + t.Errorf("unmarshalRouteConfigResource(%s), got unexpected update, diff (-got +want): %v", pretty.ToJSON(test.resource), diff) + } + }) + } +} + +func (s) TestRoutesProtoToSlice(t *testing.T) { + sm, _ := matcher.StringMatcherFromProto(&v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "tv"}}) + var ( + goodRouteWithFilterConfigs = func(cfgs map[string]*anypb.Any) []*v3routepb.Route { + // Sets per-filter config in cluster "B" and in the route. + return []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + CaseSensitive: &wrapperspb.BoolValue{Value: false}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}, TypedPerFilterConfig: cfgs}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + TypedPerFilterConfig: cfgs, + }} + } + goodUpdateWithFilterConfigs = func(cfgs map[string]httpfilter.FilterConfig) []*Route { + // Sets per-filter config in cluster "B" and in the route. + return []*Route{{ + Prefix: newStringP("/"), + CaseInsensitive: true, + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60, HTTPFilterConfigOverride: cfgs}}, + HTTPFilterConfigOverride: cfgs, + ActionType: RouteActionRoute, + }} + } + ) + + tests := []struct { + name string + routes []*v3routepb.Route + wantRoutes []*Route + wantErr bool + }{ + { + name: "no path", + routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{}, + }}, + wantErr: true, + }, + { + name: "case_sensitive is false", + routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + CaseSensitive: &wrapperspb.BoolValue{Value: false}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + }}, + wantRoutes: []*Route{{ + Prefix: newStringP("/"), + CaseInsensitive: true, + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + ActionType: RouteActionRoute, + }}, + }, + { + name: "good", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{ + PrefixMatch: "tv", + }, + InvertMatch: true, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(true), + PrefixMatch: newStringP("tv"), + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, + { + name: "good with regex matchers", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: "/a/"}}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "tv"}}, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Regex: func() *regexp.Regexp { return regexp.MustCompile("/a/") }(), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(false), + RegexMatch: func() *regexp.Regexp { return regexp.MustCompile("tv") }(), + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, + { + name: "good with string matcher", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: "/a/"}}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_StringMatch{StringMatch: &v3matcherpb.StringMatcher{MatchPattern: &v3matcherpb.StringMatcher_Exact{Exact: "tv"}}}, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Regex: func() *regexp.Regexp { return regexp.MustCompile("/a/") }(), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(false), + StringMatch: &sm, + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, + { + name: "query is ignored", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + }, + { + Name: "with_query", + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/b/"}, + QueryParameters: []*v3routepb.QueryParameterMatcher{{Name: "route_will_be_ignored"}}, + }, + }, + }, + // Only one route in the result, because the second one with query + // parameters is ignored. + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, + { + name: "unrecognized path specifier", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_ConnectMatcher_{}, + }, + }, + }, + wantErr: true, + }, + { + name: "bad regex in path specifier", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_SafeRegex{SafeRegex: &v3matcherpb.RegexMatcher{Regex: "??"}}, + Headers: []*v3routepb.HeaderMatcher{ + { + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{PrefixMatch: "tv"}, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}}, + }, + }, + }, + wantErr: true, + }, + { + name: "bad regex in header specifier", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + Headers: []*v3routepb.HeaderMatcher{ + { + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_SafeRegexMatch{SafeRegexMatch: &v3matcherpb.RegexMatcher{Regex: "??"}}, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: clusterName}}, + }, + }, + }, + wantErr: true, + }, + { + name: "unrecognized header match specifier", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_StringMatch{}, + }, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "no cluster in weighted clusters action", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{}}}}, + }, + }, + wantErr: true, + }, + { + name: "all 0-weight clusters in weighted clusters action", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 0}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 0}}, + }, + }}}}, + }, + }, + wantErr: true, + }, + { + name: "The sum of all weighted clusters is more than uint32", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: math.MaxUint32}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: math.MaxUint32}}, + }, + }}}}, + }, + }, + wantErr: true, + }, + { + name: "unsupported cluster specifier", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_ClusterSpecifierPlugin{}}}, + }, + }, + wantErr: true, + }, + { + name: "default totalWeight is 100 in weighted clusters action", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, + { + name: "default totalWeight is 100 in weighted clusters action", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 30}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 20}}, + }, + }}}}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 20}, "B": {Weight: 30}}, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, + { + name: "good-with-channel-id-hash-policy", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{ + PrefixMatch: "tv", + }, + InvertMatch: true, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "io.grpc.channel_id"}}}, + }, + }}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(true), + PrefixMatch: newStringP("tv"), + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + HashPolicies: []*HashPolicy{ + {HashPolicyType: HashPolicyTypeChannelID}, + }, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, + // This tests that policy.Regex ends up being nil if RegexRewrite is not + // set in xds response. + { + name: "good-with-header-hash-policy-no-regex-specified", + routes: []*v3routepb.Route{ + { + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/a/"}, + Headers: []*v3routepb.HeaderMatcher{ + { + Name: "th", + HeaderMatchSpecifier: &v3routepb.HeaderMatcher_PrefixMatch{ + PrefixMatch: "tv", + }, + InvertMatch: true, + }, + }, + RuntimeFraction: &v3corepb.RuntimeFractionalPercent{ + DefaultValue: &v3typepb.FractionalPercent{ + Numerator: 1, + Denominator: v3typepb.FractionalPercent_HUNDRED, + }, + }, + }, + Action: &v3routepb.Route_Route{ + Route: &v3routepb.RouteAction{ + ClusterSpecifier: &v3routepb.RouteAction_WeightedClusters{ + WeightedClusters: &v3routepb.WeightedCluster{ + Clusters: []*v3routepb.WeightedCluster_ClusterWeight{ + {Name: "B", Weight: &wrapperspb.UInt32Value{Value: 60}}, + {Name: "A", Weight: &wrapperspb.UInt32Value{Value: 40}}, + }, + }}, + HashPolicy: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{Header: &v3routepb.RouteAction_HashPolicy_Header{HeaderName: ":path"}}}, + }, + }}, + }, + }, + wantRoutes: []*Route{{ + Prefix: newStringP("/a/"), + Headers: []*HeaderMatcher{ + { + Name: "th", + InvertMatch: newBoolP(true), + PrefixMatch: newStringP("tv"), + }, + }, + Fraction: newUInt32P(10000), + WeightedClusters: map[string]WeightedCluster{"A": {Weight: 40}, "B": {Weight: 60}}, + HashPolicies: []*HashPolicy{ + {HashPolicyType: HashPolicyTypeHeader, + HeaderName: ":path"}, + }, + ActionType: RouteActionRoute, + }}, + wantErr: false, + }, + { + name: "with custom HTTP filter config", + routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": customFilterConfig}), + wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), + }, + { + name: "with custom HTTP filter config in typed struct", + routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedCustomFilterOldTypedStructConfig}), + wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterOldTypedStructConfig}}), + }, + { + name: "with optional custom HTTP filter config", + routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("custom.filter")}), + wantRoutes: goodUpdateWithFilterConfigs(map[string]httpfilter.FilterConfig{"foo": filterConfig{Override: customFilterConfig}}), + }, + { + name: "with erroring custom HTTP filter config", + routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": errFilterConfig}), + wantErr: true, + }, + { + name: "with optional erroring custom HTTP filter config", + routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("err.custom.filter")}), + wantErr: true, + }, + { + name: "with unknown custom HTTP filter config", + routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": unknownFilterConfig}), + wantErr: true, + }, + { + name: "with optional unknown custom HTTP filter config", + routes: goodRouteWithFilterConfigs(map[string]*anypb.Any{"foo": wrappedOptionalFilter("unknown.custom.filter")}), + wantRoutes: goodUpdateWithFilterConfigs(nil), + }, + } + + cmpOpts := []cmp.Option{ + cmp.AllowUnexported(Route{}, HeaderMatcher{}, Int64Range{}, regexp.Regexp{}), + cmpopts.EquateEmpty(), + cmp.Transformer("FilterConfig", func(fc httpfilter.FilterConfig) string { + return fmt.Sprint(fc) + }), + } + oldRingHashSupport := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRingHashSupport }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, _, err := routesProtoToSlice(tt.routes, nil) + if (err != nil) != tt.wantErr { + t.Fatalf("routesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(got, tt.wantRoutes, cmpOpts...); diff != "" { + t.Fatalf("routesProtoToSlice() returned unexpected diff (-got +want):\n%s", diff) + } + }) + } +} + +func (s) TestHashPoliciesProtoToSlice(t *testing.T) { + tests := []struct { + name string + hashPolicies []*v3routepb.RouteAction_HashPolicy + wantHashPolicies []*HashPolicy + wantErr bool + }{ + // header-hash-policy tests a basic hash policy that specifies to hash a + // certain header. + { + name: "header-hash-policy", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + { + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: ":path", + RegexRewrite: &v3matcherpb.RegexMatchAndSubstitute{ + Pattern: &v3matcherpb.RegexMatcher{Regex: "/products"}, + Substitution: "/products", + }, + }, + }, + }, + }, + wantHashPolicies: []*HashPolicy{ + { + HashPolicyType: HashPolicyTypeHeader, + HeaderName: ":path", + Regex: func() *regexp.Regexp { return regexp.MustCompile("/products") }(), + RegexSubstitution: "/products", + }, + }, + }, + // channel-id-hash-policy tests a basic hash policy that specifies to + // hash a unique identifier of the channel. + { + name: "channel-id-hash-policy", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "io.grpc.channel_id"}}}, + }, + wantHashPolicies: []*HashPolicy{ + {HashPolicyType: HashPolicyTypeChannelID}, + }, + }, + // unsupported-filter-state-key tests that an unsupported key in the + // filter state hash policy are treated as a no-op. + { + name: "wrong-filter-state-key", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "unsupported key"}}}, + }, + }, + // no-op-hash-policy tests that hash policies that are not supported by + // grpc are treated as a no-op. + { + name: "no-op-hash-policy", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + {PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{}}, + }, + }, + // header-and-channel-id-hash-policy test that a list of header and + // channel id hash policies are successfully converted to an internal + // struct. + { + name: "header-and-channel-id-hash-policy", + hashPolicies: []*v3routepb.RouteAction_HashPolicy{ + { + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{ + Header: &v3routepb.RouteAction_HashPolicy_Header{ + HeaderName: ":path", + RegexRewrite: &v3matcherpb.RegexMatchAndSubstitute{ + Pattern: &v3matcherpb.RegexMatcher{Regex: "/products"}, + Substitution: "/products", + }, + }, + }, + }, + { + PolicySpecifier: &v3routepb.RouteAction_HashPolicy_FilterState_{FilterState: &v3routepb.RouteAction_HashPolicy_FilterState{Key: "io.grpc.channel_id"}}, + Terminal: true, + }, + }, + wantHashPolicies: []*HashPolicy{ + { + HashPolicyType: HashPolicyTypeHeader, + HeaderName: ":path", + Regex: func() *regexp.Regexp { return regexp.MustCompile("/products") }(), + RegexSubstitution: "/products", + }, + { + HashPolicyType: HashPolicyTypeChannelID, + Terminal: true, + }, + }, + }, + } + + oldRingHashSupport := envconfig.XDSRingHash + envconfig.XDSRingHash = true + defer func() { envconfig.XDSRingHash = oldRingHashSupport }() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := hashPoliciesProtoToSlice(tt.hashPolicies) + if (err != nil) != tt.wantErr { + t.Fatalf("hashPoliciesProtoToSlice() error = %v, wantErr %v", err, tt.wantErr) + } + if diff := cmp.Diff(got, tt.wantHashPolicies, cmp.AllowUnexported(regexp.Regexp{})); diff != "" { + t.Fatalf("hashPoliciesProtoToSlice() returned unexpected diff (-got +want):\n%s", diff) + } + }) + } +} + +func newStringP(s string) *string { + return &s +} + +func newUInt32P(i uint32) *uint32 { + return &i +} + +func newBoolP(b bool) *bool { + return &b +} + +func newDurationP(d time.Duration) *time.Duration { + return &d +} diff --git a/xds/internal/xdsclient/xdsresource/version/version.go b/xds/internal/xdsclient/xdsresource/version/version.go new file mode 100644 index 000000000000..82ad5fe52c70 --- /dev/null +++ b/xds/internal/xdsclient/xdsresource/version/version.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package version defines constants to distinguish between supported xDS API +// versions. +package version + +// Resource URLs. We need to be able to accept either version of the resource +// regardless of the version of the transport protocol in use. +const ( + googleapiPrefix = "type.googleapis.com/" + + V3ListenerType = "envoy.config.listener.v3.Listener" + V3RouteConfigType = "envoy.config.route.v3.RouteConfiguration" + V3ClusterType = "envoy.config.cluster.v3.Cluster" + V3EndpointsType = "envoy.config.endpoint.v3.ClusterLoadAssignment" + + V3ResourceWrapperURL = googleapiPrefix + "envoy.service.discovery.v3.Resource" + V3ListenerURL = googleapiPrefix + V3ListenerType + V3RouteConfigURL = googleapiPrefix + V3RouteConfigType + V3ClusterURL = googleapiPrefix + V3ClusterType + V3EndpointsURL = googleapiPrefix + V3EndpointsType + V3HTTPConnManagerURL = googleapiPrefix + "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" + V3UpstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext" + V3DownstreamTLSContextURL = googleapiPrefix + "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" +) diff --git a/xds/server.go b/xds/server.go index 805f59b4f5ac..c346c6a47c1b 100644 --- a/xds/server.go +++ b/xds/server.go @@ -23,87 +23,76 @@ import ( "errors" "fmt" "net" - "strings" "sync" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" "google.golang.org/grpc/xds/internal/server" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) const serverPrefix = "[xds-server %p] " var ( // These new functions will be overridden in unit tests. - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { return xdsclient.New() } - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return grpc.NewServer(opts...) } - // Unexported function to retrieve transport credentials from a gRPC server. - grpcGetServerCreds = internal.GetServerCredentials.(func(*grpc.Server) credentials.TransportCredentials) - logger = grpclog.Component("xds") + grpcGetServerCreds = internal.GetServerCredentials.(func(*grpc.Server) credentials.TransportCredentials) + drainServerTransports = internal.DrainServerTransports.(func(*grpc.Server, string)) + logger = grpclog.Component("xds") ) -func prefixLogger(p *GRPCServer) *internalgrpclog.PrefixLogger { - return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, p)) -} - -// xdsClientInterface contains methods from xdsClient.Client which are used by -// the server. This is useful for overriding in unit tests. -type xdsClientInterface interface { - WatchListener(string, func(xdsclient.ListenerUpdate, error)) func() - BootstrapConfig() *bootstrap.Config - Close() -} - -// grpcServerInterface contains methods from grpc.Server which are used by the +// grpcServer contains methods from grpc.Server which are used by the // GRPCServer type here. This is useful for overriding in unit tests. -type grpcServerInterface interface { +type grpcServer interface { RegisterService(*grpc.ServiceDesc, interface{}) Serve(net.Listener) error Stop() GracefulStop() + GetServiceInfo() map[string]grpc.ServiceInfo } // GRPCServer wraps a gRPC server and provides server-side xDS functionality, by // communication with a management server using xDS APIs. It implements the // grpc.ServiceRegistrar interface and can be passed to service registration // functions in IDL generated code. -// -// Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. type GRPCServer struct { - gs grpcServerInterface + gs grpcServer quit *grpcsync.Event logger *internalgrpclog.PrefixLogger xdsCredsInUse bool + opts *serverOptions // clientMu is used only in initXDSClient(), which is called at the // beginning of Serve(), where we have to decide if we have to create a // client or use an existing one. - clientMu sync.Mutex - xdsC xdsClientInterface + clientMu sync.Mutex + xdsC xdsclient.XDSClient + xdsClientClose func() } // NewGRPCServer creates an xDS-enabled gRPC server using the passed in opts. // The underlying gRPC server has no service registered and has not started to // accept requests yet. -// -// Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { newOpts := []grpc.ServerOption{ grpc.ChainUnaryInterceptor(xdsUnaryInterceptor), @@ -114,8 +103,9 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { gs: newGRPCServer(newOpts...), quit: grpcsync.NewEvent(), } - s.logger = prefixLogger(s) + s.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(serverPrefix, s)) s.logger.Infof("Created xds.GRPCServer") + s.handleServerOptions(opts) // We type assert our underlying gRPC server to the real grpc.Server here // before trying to retrieve the configured credentials. This approach @@ -133,6 +123,39 @@ func NewGRPCServer(opts ...grpc.ServerOption) *GRPCServer { return s } +// handleServerOptions iterates through the list of server options passed in by +// the user, and handles the xDS server specific options. +func (s *GRPCServer) handleServerOptions(opts []grpc.ServerOption) { + so := s.defaultServerOptions() + for _, opt := range opts { + if o, ok := opt.(*serverOption); ok { + o.apply(so) + } + } + s.opts = so +} + +func (s *GRPCServer) defaultServerOptions() *serverOptions { + return &serverOptions{ + // A default serving mode change callback which simply logs at the + // default-visible log level. This will be used if the application does not + // register a mode change callback. + // + // Note that this means that `s.opts.modeCallback` will never be nil and can + // safely be invoked directly from `handleServingModeChanges`. + modeCallback: s.loggingServerModeChangeCallback, + } +} + +func (s *GRPCServer) loggingServerModeChangeCallback(addr net.Addr, args ServingModeChangeArgs) { + switch args.Mode { + case connectivity.ServingModeServing: + s.logger.Errorf("Listener %q entering mode: %q", addr.String(), args.Mode) + case connectivity.ServingModeNotServing: + s.logger.Errorf("Listener %q entering mode: %q due to error: %v", addr.String(), args.Mode, args.Err) + } +} + // RegisterService registers a service and its implementation to the underlying // gRPC server. It is called from the IDL generated code. This must be called // before invoking Serve. @@ -140,21 +163,38 @@ func (s *GRPCServer) RegisterService(sd *grpc.ServiceDesc, ss interface{}) { s.gs.RegisterService(sd, ss) } +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *GRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { + return s.gs.GetServiceInfo() +} + // initXDSClient creates a new xdsClient if there is no existing one available. func (s *GRPCServer) initXDSClient() error { s.clientMu.Lock() defer s.clientMu.Unlock() + if s.quit.HasFired() { + return grpc.ErrServerStopped + } if s.xdsC != nil { return nil } - client, err := newXDSClient() + newXDSClient := newXDSClient + if s.opts.bootstrapContentsForTesting != nil { + // Bootstrap file contents may be specified as a server option for tests. + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return xdsclient.NewWithBootstrapContentsForTesting(s.opts.bootstrapContentsForTesting) + } + } + + client, close, err := newXDSClient() if err != nil { return fmt.Errorf("xds: failed to create xds-client: %v", err) } s.xdsC = client - s.logger.Infof("Created an xdsClient") + s.xdsClientClose = close return nil } @@ -165,7 +205,6 @@ func (s *GRPCServer) initXDSClient() error { // initiated here. // // Serve will return a non-nil error unless Stop or GracefulStop is called. -// TODO: Support callback to get notified on serving state changes. func (s *GRPCServer) Serve(lis net.Listener) error { s.logger.Infof("Serve() passed a net.Listener on %s", lis.Addr().String()) if _, ok := lis.Addr().(*net.TCPAddr); !ok { @@ -177,7 +216,6 @@ func (s *GRPCServer) Serve(lis net.Listener) error { if err := s.initXDSClient(); err != nil { return err } - cfg := s.xdsC.BootstrapConfig() if cfg == nil { return errors.New("bootstrap configuration is empty") @@ -202,10 +240,12 @@ func (s *GRPCServer) Serve(lis net.Listener) error { if cfg.ServerListenerResourceNameTemplate == "" { return errors.New("missing server_listener_resource_name_template in the bootstrap configuration") } - name := cfg.ServerListenerResourceNameTemplate - if strings.Contains(cfg.ServerListenerResourceNameTemplate, "%s") { - name = strings.Replace(cfg.ServerListenerResourceNameTemplate, "%s", lis.Addr().String(), -1) - } + name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate, lis.Addr().String()) + + modeUpdateCh := buffer.NewUnbounded() + go func() { + s.handleServingModeChanges(modeUpdateCh) + }() // Create a listenerWrapper which handles all functionality required by // this particular instance of Serve(). @@ -214,6 +254,18 @@ func (s *GRPCServer) Serve(lis net.Listener) error { ListenerResourceName: name, XDSCredsInUse: s.xdsCredsInUse, XDSClient: s.xdsC, + ModeCallback: func(addr net.Addr, mode connectivity.ServingMode, err error) { + modeUpdateCh.Put(&modeChangeArgs{ + addr: addr, + mode: mode, + err: err, + }) + }, + DrainCallback: func(addr net.Addr) { + if gs, ok := s.gs.(*grpc.Server); ok { + drainServerTransports(gs, addr.String()) + } + }, }) // Block until a good LDS response is received or the server is stopped. @@ -223,21 +275,72 @@ func (s *GRPCServer) Serve(lis net.Listener) error { // need to explicitly close the listener. Cancellation of the xDS watch // is handled by the listenerWrapper. lw.Close() + modeUpdateCh.Close() return nil case <-goodUpdateCh: } return s.gs.Serve(lw) } +// modeChangeArgs wraps argument required for invoking mode change callback. +type modeChangeArgs struct { + addr net.Addr + mode connectivity.ServingMode + err error +} + +// handleServingModeChanges runs as a separate goroutine, spawned from Serve(). +// It reads a channel on to which mode change arguments are pushed, and in turn +// invokes the user registered callback. It also calls an internal method on the +// underlying grpc.Server to gracefully close existing connections, if the +// listener moved to a "not-serving" mode. +func (s *GRPCServer) handleServingModeChanges(updateCh *buffer.Unbounded) { + for { + select { + case <-s.quit.Done(): + return + case u, ok := <-updateCh.Get(): + if !ok { + return + } + updateCh.Load() + args := u.(*modeChangeArgs) + if args.mode == connectivity.ServingModeNotServing { + // We type assert our underlying gRPC server to the real + // grpc.Server here before trying to initiate the drain + // operation. This approach avoids performing the same type + // assertion in the grpc package which provides the + // implementation for internal.GetServerCredentials, and allows + // us to use a fake gRPC server in tests. + if gs, ok := s.gs.(*grpc.Server); ok { + drainServerTransports(gs, args.addr.String()) + } + } + + // The XdsServer API will allow applications to register a "serving state" + // callback to be invoked when the server begins serving and when the + // server encounters errors that force it to be "not serving". If "not + // serving", the callback must be provided error information, for + // debugging use by developers - A36. + s.opts.modeCallback(args.addr, ServingModeChangeArgs{ + Mode: args.mode, + Err: args.err, + }) + } + } +} + // Stop stops the underlying gRPC server. It immediately closes all open // connections. It cancels all active RPCs on the server side and the // corresponding pending RPCs on the client side will get notified by connection // errors. func (s *GRPCServer) Stop() { + s.clientMu.Lock() + defer s.clientMu.Unlock() s.quit.Fire() s.gs.Stop() if s.xdsC != nil { - s.xdsC.Close() + s.xdsClientClose() } } @@ -245,25 +348,88 @@ func (s *GRPCServer) Stop() { // from accepting new connections and RPCs and blocks until all the pending RPCs // are finished. func (s *GRPCServer) GracefulStop() { + s.clientMu.Lock() + defer s.clientMu.Unlock() s.quit.Fire() s.gs.GracefulStop() if s.xdsC != nil { - s.xdsC.Close() + s.xdsClientClose() } } +// routeAndProcess routes the incoming RPC to a configured route in the route +// table and also processes the RPC by running the incoming RPC through any HTTP +// Filters configured. +func routeAndProcess(ctx context.Context) error { + conn := transport.GetConnection(ctx) + cw, ok := conn.(interface { + VirtualHosts() []xdsresource.VirtualHostWithInterceptors + }) + if !ok { + return errors.New("missing virtual hosts in incoming context") + } + mn, ok := grpc.Method(ctx) + if !ok { + return errors.New("missing method name in incoming context") + } + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errors.New("missing metadata in incoming context") + } + // A41 added logic to the core grpc implementation to guarantee that once + // the RPC gets to this point, there will be a single, unambiguous authority + // present in the header map. + authority := md.Get(":authority") + vh := xdsresource.FindBestMatchingVirtualHostServer(authority[0], cw.VirtualHosts()) + if vh == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Virtual Host") + } + + var rwi *xdsresource.RouteWithInterceptors + rpcInfo := iresolver.RPCInfo{ + Context: ctx, + Method: mn, + } + for _, r := range vh.Routes { + if r.M.Match(rpcInfo) { + // "NonForwardingAction is expected for all Routes used on server-side; a route with an inappropriate action causes + // RPCs matching that route to fail with UNAVAILABLE." - A36 + if r.ActionType != xdsresource.RouteActionNonForwardingAction { + return status.Error(codes.Unavailable, "the incoming RPC matched to a route that was not of action type non forwarding") + } + rwi = &r + break + } + } + if rwi == nil { + return status.Error(codes.Unavailable, "the incoming RPC did not match a configured Route") + } + for _, interceptor := range rwi.Interceptors { + if err := interceptor.AllowRPC(ctx); err != nil { + return status.Errorf(codes.PermissionDenied, "Incoming RPC is not allowed: %v", err) + } + } + return nil +} + // xdsUnaryInterceptor is the unary interceptor added to the gRPC server to // perform any xDS specific functionality on unary RPCs. -// -// This is a no-op at this point. func xdsUnaryInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + if envconfig.XDSRBAC { + if err := routeAndProcess(ctx); err != nil { + return nil, err + } + } return handler(ctx, req) } // xdsStreamInterceptor is the stream interceptor added to the gRPC server to // perform any xDS specific functionality on streaming RPCs. -// -// This is a no-op at this point. func xdsStreamInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if envconfig.XDSRBAC { + if err := routeAndProcess(ss.Context()); err != nil { + return err + } + } return handler(srv, ss) } diff --git a/xds/server_options.go b/xds/server_options.go new file mode 100644 index 000000000000..9b9700cf3b33 --- /dev/null +++ b/xds/server_options.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xds + +import ( + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" +) + +type serverOptions struct { + modeCallback ServingModeCallbackFunc + bootstrapContentsForTesting []byte +} + +type serverOption struct { + grpc.EmptyServerOption + apply func(*serverOptions) +} + +// ServingModeCallback returns a grpc.ServerOption which allows users to +// register a callback to get notified about serving mode changes. +func ServingModeCallback(cb ServingModeCallbackFunc) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.modeCallback = cb }} +} + +// ServingModeCallbackFunc is the callback that users can register to get +// notified about the server's serving mode changes. The callback is invoked +// with the address of the listener and its new mode. +// +// Users must not perform any blocking operations in this callback. +type ServingModeCallbackFunc func(addr net.Addr, args ServingModeChangeArgs) + +// ServingModeChangeArgs wraps the arguments passed to the serving mode callback +// function. +type ServingModeChangeArgs struct { + // Mode is the new serving mode of the server listener. + Mode connectivity.ServingMode + // Err is set to a non-nil error if the server has transitioned into + // not-serving mode. + Err error +} + +// BootstrapContentsForTesting returns a grpc.ServerOption which allows users +// to inject a bootstrap configuration used by only this server, instead of the +// global configuration from the environment variables. +// +// # Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func BootstrapContentsForTesting(contents []byte) grpc.ServerOption { + return &serverOption{apply: func(o *serverOptions) { o.bootstrapContentsForTesting = contents }} +} diff --git a/xds/server_test.go b/xds/server_test.go index f787a129057e..b91049a05fde 100644 --- a/xds/server_test.go +++ b/xds/server_test.go @@ -25,19 +25,31 @@ import ( "net" "reflect" "strings" + "sync" "testing" "time" "google.golang.org/grpc" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/credentials/xds" "google.golang.org/grpc/internal/grpctest" "google.golang.org/grpc/internal/testutils" - xdsclient "google.golang.org/grpc/xds/internal/client" - "google.golang.org/grpc/xds/internal/client/bootstrap" + "google.golang.org/grpc/internal/testutils/xds/e2e" + _ "google.golang.org/grpc/xds/internal/httpfilter/router" xdstestutils "google.golang.org/grpc/xds/internal/testutils" "google.golang.org/grpc/xds/internal/testutils/fakeclient" + "google.golang.org/grpc/xds/internal/xdsclient" + "google.golang.org/grpc/xds/internal/xdsclient/bootstrap" + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" + + v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" + v3listenerpb "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" + v3routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + v3httppb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + v3tlspb "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" ) const ( @@ -46,6 +58,68 @@ const ( testServerListenerResourceNameTemplate = "/path/to/resource/%s/%s" ) +var listenerWithFilterChains = &v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + FilterChainMatch: &v3listenerpb.FilterChainMatch{ + PrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourceType: v3listenerpb.FilterChainMatch_SAME_IP_OR_LOOPBACK, + SourcePrefixRanges: []*v3corepb.CidrRange{ + { + AddressPrefix: "192.168.0.0", + PrefixLen: &wrapperspb.UInt32Value{ + Value: uint32(16), + }, + }, + }, + SourcePorts: []uint32{80}, + }, + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, + }, + }, + }, + }, +} + type s struct { grpctest.Tester } @@ -66,9 +140,10 @@ func (f *fakeGRPCServer) RegisterService(*grpc.ServiceDesc, interface{}) { f.registerServiceCh.Send(nil) } -func (f *fakeGRPCServer) Serve(net.Listener) error { +func (f *fakeGRPCServer) Serve(lis net.Listener) error { f.serveCh.Send(nil) <-f.done + lis.Close() return nil } @@ -81,6 +156,10 @@ func (f *fakeGRPCServer) GracefulStop() { f.gracefulStopCh.Send(nil) } +func (f *fakeGRPCServer) GetServiceInfo() map[string]grpc.ServiceInfo { + panic("implement me") +} + func newFakeGRPCServer() *fakeGRPCServer { return &fakeGRPCServer{ done: make(chan struct{}), @@ -128,7 +207,7 @@ func (s) TestNewServer(t *testing.T) { wantServerOpts := len(test.serverOpts) + 2 origNewGRPCServer := newGRPCServer - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { if got := len(opts); got != wantServerOpts { t.Fatalf("%d ServerOptions passed to grpc.Server, want %d", got, wantServerOpts) } @@ -156,7 +235,7 @@ func (s) TestRegisterService(t *testing.T) { fs := newFakeGRPCServer() origNewGRPCServer := newGRPCServer - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { return fs } + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return fs } defer func() { newGRPCServer = origNewGRPCServer }() s := NewGRPCServer() @@ -237,27 +316,44 @@ func (p *fakeProvider) Close() { p.Distributor.Stop() } +// setupClientOverride sets up an override for new xdsClient creation. +func setupClientOverride(t *testing.T) func() { + origNewXDSClient := newXDSClient + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + c := fakeclient.NewClient() + c.SetBootstrapConfig(&bootstrap.Config{ + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), + NodeProto: xdstestutils.EmptyNodeProtoV3, + ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, + CertProviderConfigs: certProviderConfigs, + }) + return c, func() {}, nil + } + return func() { + newXDSClient = origNewXDSClient + } +} + // setupOverrides sets up overrides for bootstrap config, new xdsClient creation // and new gRPC.Server creation. -func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { +func setupOverrides(t *testing.T) (*fakeGRPCServer, *testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() c.SetBootstrapConfig(&bootstrap.Config{ - BalancerName: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, CertProviderConfigs: certProviderConfigs, }) clientCh.Send(c) - return c, nil + return c, func() {}, nil } fs := newFakeGRPCServer() origNewGRPCServer := newGRPCServer - newGRPCServer = func(opts ...grpc.ServerOption) grpcServerInterface { return fs } + newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return fs } return fs, clientCh, func() { newXDSClient = origNewXDSClient @@ -269,14 +365,13 @@ func setupOverrides() (*fakeGRPCServer, *testutils.Channel, func()) { // one. Tests that use xdsCredentials need a real grpc.Server instead of a fake // one, because the xDS-enabled server needs to read configured creds from the // underlying grpc.Server to confirm whether xdsCreds were configured. -func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, func()) { +func setupOverridesForXDSCreds(t *testing.T, includeCertProviderCfg bool) (*testutils.Channel, func()) { clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() bc := &bootstrap.Config{ - BalancerName: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, } @@ -285,7 +380,7 @@ func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, } c.SetBootstrapConfig(bc) clientCh.Send(c) - return c, nil + return c, func() {}, nil } return clientCh, func() { newXDSClient = origNewXDSClient } @@ -293,22 +388,29 @@ func setupOverridesForXDSCreds(includeCertProviderCfg bool) (*testutils.Channel, // TestServeSuccess tests the successful case of calling Serve(). // The following sequence of events happen: -// 1. Create a new GRPCServer and call Serve() in a goroutine. -// 2. Make sure an xdsClient is created, and an LDS watch is registered. -// 3. Push an error response from the xdsClient, and make sure that Serve() does -// not exit. -// 4. Push a good response from the xdsClient, and make sure that Serve() on the -// underlying grpc.Server is called. +// 1. Create a new GRPCServer and call Serve() in a goroutine. +// 2. Make sure an xdsClient is created, and an LDS watch is registered. +// 3. Push an error response from the xdsClient, and make sure that Serve() does +// not exit. +// 4. Push a good response from the xdsClient, and make sure that Serve() on the +// underlying grpc.Server is called. func (s) TestServeSuccess(t *testing.T) { - fs, clientCh, cleanup := setupOverrides() + fs, clientCh, cleanup := setupOverrides(t) defer cleanup() - server := NewGRPCServer() + // Create a new xDS-enabled gRPC server and pass it a server option to get + // notified about serving mode changes. + modeChangeCh := testutils.NewChannel() + modeChangeOption := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) { + t.Logf("server mode change callback invoked for listener %q with mode %q and error %v", addr.String(), args.Mode, args.Err) + modeChangeCh.Send(args.Mode) + }) + server := NewGRPCServer(modeChangeOption) defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } // Call Serve() in a goroutine, and push on a channel when Serve returns. @@ -341,37 +443,59 @@ func (s) TestServeSuccess(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{}, errors.New("LDS error")) + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "LDS resource not found")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { t.Fatal("Serve() returned after a bad LDS response") } + // Make sure the serving mode changes appropriately. + v, err := modeChangeCh.Receive(ctx) + if err != nil { + t.Fatalf("error when waiting for serving mode to change: %v", err) + } + if mode := v.(connectivity.ServingMode); mode != connectivity.ServingModeNotServing { + t.Fatalf("server mode is %q, want %q", mode, connectivity.ServingModeNotServing) + } + // Push a good LDS response, and wait for Serve() to be invoked on the // underlying grpc.Server. + fcm, err := xdsresource.NewFilterChainManager(listenerWithFilterChains) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } addr, port := splitHostPort(lis.Addr().String()) - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ - Address: addr, - Port: port, + InboundListenerCfg: &xdsresource.InboundListenerConfig{ + Address: addr, + Port: port, + FilterChains: fcm, }, }, nil) if _, err := fs.serveCh.Receive(ctx); err != nil { t.Fatalf("error when waiting for Serve() to be invoked on the grpc.Server") } + // Make sure the serving mode changes appropriately. + v, err = modeChangeCh.Receive(ctx) + if err != nil { + t.Fatalf("error when waiting for serving mode to change: %v", err) + } + if mode := v.(connectivity.ServingMode); mode != connectivity.ServingModeServing { + t.Fatalf("server mode is %q, want %q", mode, connectivity.ServingModeServing) + } + // Push an update to the registered listener watch callback with a Listener // resource whose host:port does not match the actual listening address and - // port. Serve() should not return and should continue to use the old state. - // - // This will change once we add start tracking serving state. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ + // port. This will push the listener to "not-serving" mode. + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ - Address: "10.20.30.40", - Port: "666", + InboundListenerCfg: &xdsresource.InboundListenerConfig{ + Address: "10.20.30.40", + Port: "666", + FilterChains: fcm, }, }, nil) sCtx, sCancel = context.WithTimeout(context.Background(), defaultTestShortTimeout) @@ -379,22 +503,32 @@ func (s) TestServeSuccess(t *testing.T) { if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { t.Fatal("Serve() returned after a bad LDS response") } + + // Make sure the serving mode changes appropriately. + v, err = modeChangeCh.Receive(ctx) + if err != nil { + t.Fatalf("error when waiting for serving mode to change: %v", err) + } + if mode := v.(connectivity.ServingMode); mode != connectivity.ServingModeNotServing { + t.Fatalf("server mode is %q, want %q", mode, connectivity.ServingModeNotServing) + } } // TestServeWithStop tests the case where Stop() is called before an LDS update // is received. This should cause Serve() to exit before calling Serve() on the // underlying grpc.Server. func (s) TestServeWithStop(t *testing.T) { - fs, clientCh, cleanup := setupOverrides() + fs, clientCh, cleanup := setupOverrides(t) defer cleanup() // Note that we are not deferring the Stop() here since we explicitly call // it after the LDS watch has been registered. server := NewGRPCServer() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + server.Stop() + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } // Call Serve() in a goroutine, and push on a channel when Serve returns. @@ -411,6 +545,7 @@ func (s) TestServeWithStop(t *testing.T) { defer cancel() c, err := clientCh.Receive(ctx) if err != nil { + server.Stop() t.Fatalf("error when waiting for new xdsClient to be created: %v", err) } client := c.(*fakeclient.Client) @@ -451,9 +586,9 @@ func (s) TestServeBootstrapFailure(t *testing.T) { server := NewGRPCServer() defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } serveDone := testutils.NewChannel() @@ -484,8 +619,7 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { { desc: "certificate provider config is missing", bootstrapConfig: &bootstrap.Config{ - BalancerName: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: xdstestutils.EmptyNodeProtoV3, ServerListenerResourceNameTemplate: testServerListenerResourceNameTemplate, }, @@ -493,8 +627,7 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { { desc: "server_listener_resource_name_template is missing", bootstrapConfig: &bootstrap.Config{ - BalancerName: "dummyBalancer", - Creds: grpc.WithTransportCredentials(insecure.NewCredentials()), + XDSServer: xdstestutils.ServerConfigForAddress(t, "server-address"), NodeProto: xdstestutils.EmptyNodeProtoV3, CertProviderConfigs: certProviderConfigs, }, @@ -507,11 +640,11 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { // xdsClient with the specified bootstrap configuration. clientCh := testutils.NewChannel() origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { + newXDSClient = func() (xdsclient.XDSClient, func(), error) { c := fakeclient.NewClient() c.SetBootstrapConfig(test.bootstrapConfig) clientCh.Send(c) - return c, nil + return c, func() {}, nil } defer func() { newXDSClient = origNewXDSClient }() @@ -522,9 +655,9 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { server := NewGRPCServer(grpc.Creds(xdsCreds)) defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } serveDone := testutils.NewChannel() @@ -550,17 +683,17 @@ func (s) TestServeBootstrapConfigInvalid(t *testing.T) { // verifies that Server() exits with a non-nil error. func (s) TestServeNewClientFailure(t *testing.T) { origNewXDSClient := newXDSClient - newXDSClient = func() (xdsClientInterface, error) { - return nil, errors.New("xdsClient creation failed") + newXDSClient = func() (xdsclient.XDSClient, func(), error) { + return nil, nil, errors.New("xdsClient creation failed") } defer func() { newXDSClient = origNewXDSClient }() server := NewGRPCServer() defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } serveDone := testutils.NewChannel() @@ -584,15 +717,26 @@ func (s) TestServeNewClientFailure(t *testing.T) { // server is not configured with xDS credentials. Verifies that the security // config received as part of a Listener update is not acted upon. func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { - fs, clientCh, cleanup := setupOverrides() + fs, clientCh, cleanup := setupOverrides(t) defer cleanup() - server := NewGRPCServer() + // Create a server option to get notified about serving mode changes. We don't + // do anything other than throwing a log entry here. But this is required, + // since the server code emits a log entry at the default level (which is + // ERROR) if no callback is registered for serving mode changes. Our + // testLogger fails the test if there is any log entry at ERROR level. It does + // provide an ExpectError() method, but that takes a string and it would be + // painful to construct the exact error message expected here. Instead this + // works just fine. + modeChangeOpt := ServingModeCallback(func(addr net.Addr, args ServingModeChangeArgs) { + t.Logf("Serving mode for listener %q changed to %q, err: %v", addr.String(), args.Mode, args.Err) + }) + server := NewGRPCServer(modeChangeOpt) defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } // Call Serve() in a goroutine, and push on a channel when Serve returns. @@ -626,22 +770,58 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { // Push a good LDS response with security config, and wait for Serve() to be // invoked on the underlying grpc.Server. Also make sure that certificate // providers are not created. - addr, port := splitHostPort(lis.Addr().String()) - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ - Address: addr, - Port: port, - FilterChains: []*xdsclient.FilterChain{ - { - SecurityCfg: &xdsclient.SecurityConfig{ - RootInstanceName: "default1", - IdentityInstanceName: "default2", - RequireClientCert: true, + fcm, err := xdsresource.NewFilterChainManager(&v3listenerpb.Listener{ + FilterChains: []*v3listenerpb.FilterChain{ + { + TransportSocket: &v3corepb.TransportSocket{ + Name: "envoy.transport_sockets.tls", + ConfigType: &v3corepb.TransportSocket_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3tlspb.DownstreamTlsContext{ + CommonTlsContext: &v3tlspb.CommonTlsContext{ + TlsCertificateCertificateProviderInstance: &v3tlspb.CommonTlsContext_CertificateProviderInstance{ + InstanceName: "identityPluginInstance", + CertificateName: "identityCertName", + }, + }, + }), + }, + }, + Filters: []*v3listenerpb.Filter{ + { + Name: "filter-1", + ConfigType: &v3listenerpb.Filter_TypedConfig{ + TypedConfig: testutils.MarshalAny(&v3httppb.HttpConnectionManager{ + RouteSpecifier: &v3httppb.HttpConnectionManager_RouteConfig{ + RouteConfig: &v3routepb.RouteConfiguration{ + Name: "routeName", + VirtualHosts: []*v3routepb.VirtualHost{{ + Domains: []string{"lds.target.good:3333"}, + Routes: []*v3routepb.Route{{ + Match: &v3routepb.RouteMatch{ + PathSpecifier: &v3routepb.RouteMatch_Prefix{Prefix: "/"}, + }, + Action: &v3routepb.Route_NonForwardingAction{}, + }}}}}, + }, + HttpFilters: []*v3httppb.HttpFilter{e2e.RouterHTTPFilter}, + }), + }, }, }, }, }, + }) + if err != nil { + t.Fatalf("xdsclient.NewFilterChainManager() failed with error: %v", err) + } + addr, port := splitHostPort(lis.Addr().String()) + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{ + RouteConfigName: "routeconfig", + InboundListenerCfg: &xdsresource.InboundListenerConfig{ + Address: addr, + Port: port, + FilterChains: fcm, + }, }, nil) if _, err := fs.serveCh.Receive(ctx); err != nil { t.Fatalf("error when waiting for Serve() to be invoked on the grpc.Server") @@ -657,7 +837,7 @@ func (s) TestHandleListenerUpdate_NoXDSCreds(t *testing.T) { // server is configured with xDS credentials, but receives a Listener update // with an error. Verifies that no certificate providers are created. func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) { - clientCh, cleanup := setupOverridesForXDSCreds(true) + clientCh, cleanup := setupOverridesForXDSCreds(t, true) defer cleanup() xdsCreds, err := xds.NewServerCredentials(xds.ServerOptions{FallbackCreds: insecure.NewCredentials()}) @@ -668,9 +848,9 @@ func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) { server := NewGRPCServer(grpc.Creds(xdsCreds)) defer server.Stop() - lis, err := xdstestutils.LocalTCPListener() + lis, err := testutils.LocalTCPListener() if err != nil { - t.Fatalf("xdstestutils.LocalTCPListener() failed: %v", err) + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) } // Call Serve() in a goroutine, and push on a channel when Serve returns. @@ -703,20 +883,7 @@ func (s) TestHandleListenerUpdate_ErrorUpdate(t *testing.T) { // Push an error to the registered listener watch callback and make sure // that Serve does not return. - client.InvokeWatchListenerCallback(xdsclient.ListenerUpdate{ - RouteConfigName: "routeconfig", - InboundListenerCfg: &xdsclient.InboundListenerConfig{ - FilterChains: []*xdsclient.FilterChain{ - { - SecurityCfg: &xdsclient.SecurityConfig{ - RootInstanceName: "default1", - IdentityInstanceName: "default2", - RequireClientCert: true, - }, - }, - }, - }, - }, errors.New("LDS error")) + client.InvokeWatchListenerCallback(xdsresource.ListenerUpdate{}, errors.New("LDS error")) sCtx, sCancel := context.WithTimeout(context.Background(), defaultTestShortTimeout) defer sCancel() if _, err := serveDone.Receive(sCtx); err != context.DeadlineExceeded { @@ -742,3 +909,50 @@ func verifyCertProviderNotCreated() error { } return nil } + +// TestServeReturnsErrorAfterClose tests that the xds Server returns +// grpc.ErrServerStopped if Serve is called after Close on the server. +func (s) TestServeReturnsErrorAfterClose(t *testing.T) { + cancel := setupClientOverride(t) + defer cancel() + server := NewGRPCServer() + + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + server.Stop() + err = server.Serve(lis) + if err == nil || !strings.Contains(err.Error(), grpc.ErrServerStopped.Error()) { + t.Fatalf("server erorred with wrong error, want: %v, got :%v", grpc.ErrServerStopped, err) + } +} + +// TestServeAndCloseDoNotRace tests that Serve and Close on the xDS Server do +// not race and leak the xDS Client. A leak would be found by the leak checker. +func (s) TestServeAndCloseDoNotRace(t *testing.T) { + cleanup := setupClientOverride(t) + defer cleanup() + + lis, err := testutils.LocalTCPListener() + if err != nil { + t.Fatalf("testutils.LocalTCPListener() failed: %v", err) + } + + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + server := NewGRPCServer() + wg.Add(1) + go func() { + server.Serve(lis) + wg.Done() + }() + wg.Add(1) + go func() { + server.Stop() + wg.Done() + }() + } + wg.Wait() +} diff --git a/xds/xds.go b/xds/xds.go index 562c5aa82abc..bd6ed9c90f13 100644 --- a/xds/xds.go +++ b/xds/xds.go @@ -30,17 +30,22 @@ package xds import ( "fmt" - v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" "google.golang.org/grpc" + "google.golang.org/grpc/internal" internaladmin "google.golang.org/grpc/internal/admin" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/xds/csds" - _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. - _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. - _ "google.golang.org/grpc/xds/internal/client/v2" // Register the v2 xDS API client. - _ "google.golang.org/grpc/xds/internal/client/v3" // Register the v3 xDS API client. - _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. - _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/credentials/tls/certprovider/pemfile" // Register the file watcher certificate provider plugin. + _ "google.golang.org/grpc/xds/internal/balancer" // Register the balancers. + _ "google.golang.org/grpc/xds/internal/clusterspecifier/rls" // Register the RLS cluster specifier plugin. Note that this does not register the RLS LB policy. + _ "google.golang.org/grpc/xds/internal/httpfilter/fault" // Register the fault injection filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/rbac" // Register the RBAC filter. + _ "google.golang.org/grpc/xds/internal/httpfilter/router" // Register the router filter. + _ "google.golang.org/grpc/xds/internal/resolver" // Register the xds_resolver. + _ "google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter" // Register the xDS LB Registry Converters. + + v3statusgrpc "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) func init() { @@ -52,14 +57,14 @@ func init() { case *GRPCServer: sss, ok := ss.gs.(*grpc.Server) if !ok { - logger.Warningf("grpc server within xds.GRPCServer is not *grpc.Server, CSDS will not be registered") + logger.Warning("grpc server within xds.GRPCServer is not *grpc.Server, CSDS will not be registered") return nil, nil } grpcServer = sss default: // Returning an error would cause the top level admin.Register() to // fail. Log a warning instead. - logger.Warningf("server to register service on is neither a *grpc.Server or a *xds.GRPCServer, CSDS will not be registered") + logger.Error("Server to register service on is neither a *grpc.Server or a *xds.GRPCServer, CSDS will not be registered") return nil, nil } @@ -71,3 +76,21 @@ func init() { return csdss.Close, nil }) } + +// NewXDSResolverWithConfigForTesting creates a new xDS resolver builder using +// the provided xDS bootstrap config instead of the global configuration from +// the supported environment variables. The resolver.Builder is meant to be +// used in conjunction with the grpc.WithResolvers DialOption. +// +// # Testing Only +// +// This function should ONLY be used for testing and may not work with some +// other features, including the CSDS service. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewXDSResolverWithConfigForTesting(bootstrapConfig []byte) (resolver.Builder, error) { + return internal.NewXDSResolverWithConfigForTesting.(func([]byte) (resolver.Builder, error))(bootstrapConfig) +}